1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/ISDOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MachineValueType.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include <algorithm>
49 #include <cassert>
50 #include <limits>
51 #include <utility>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "AMDGPUtti"
56 
57 static cl::opt<unsigned> UnrollThresholdPrivate(
58   "amdgpu-unroll-threshold-private",
59   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
60   cl::init(2700), cl::Hidden);
61 
62 static cl::opt<unsigned> UnrollThresholdLocal(
63   "amdgpu-unroll-threshold-local",
64   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
65   cl::init(1000), cl::Hidden);
66 
67 static cl::opt<unsigned> UnrollThresholdIf(
68   "amdgpu-unroll-threshold-if",
69   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
70   cl::init(150), cl::Hidden);
71 
72 static cl::opt<bool> UnrollRuntimeLocal(
73   "amdgpu-unroll-runtime-local",
74   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
75   cl::init(true), cl::Hidden);
76 
77 static cl::opt<bool> UseLegacyDA(
78   "amdgpu-use-legacy-divergence-analysis",
79   cl::desc("Enable legacy divergence analysis for AMDGPU"),
80   cl::init(false), cl::Hidden);
81 
82 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
83                               unsigned Depth = 0) {
84   const Instruction *I = dyn_cast<Instruction>(Cond);
85   if (!I)
86     return false;
87 
88   for (const Value *V : I->operand_values()) {
89     if (!L->contains(I))
90       continue;
91     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
92       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
93                   return SubLoop->contains(PHI); }))
94         return true;
95     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
96       return true;
97   }
98   return false;
99 }
100 
101 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
102                                             TTI::UnrollingPreferences &UP) {
103   const Function &F = *L->getHeader()->getParent();
104   UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
105   UP.MaxCount = std::numeric_limits<unsigned>::max();
106   UP.Partial = true;
107 
108   // TODO: Do we want runtime unrolling?
109 
110   // Maximum alloca size than can fit registers. Reserve 16 registers.
111   const unsigned MaxAlloca = (256 - 16) * 4;
112   unsigned ThresholdPrivate = UnrollThresholdPrivate;
113   unsigned ThresholdLocal = UnrollThresholdLocal;
114   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
115   for (const BasicBlock *BB : L->getBlocks()) {
116     const DataLayout &DL = BB->getModule()->getDataLayout();
117     unsigned LocalGEPsSeen = 0;
118 
119     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
120                return SubLoop->contains(BB); }))
121         continue; // Block belongs to an inner loop.
122 
123     for (const Instruction &I : *BB) {
124       // Unroll a loop which contains an "if" statement whose condition
125       // defined by a PHI belonging to the loop. This may help to eliminate
126       // if region and potentially even PHI itself, saving on both divergence
127       // and registers used for the PHI.
128       // Add a small bonus for each of such "if" statements.
129       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
130         if (UP.Threshold < MaxBoost && Br->isConditional()) {
131           BasicBlock *Succ0 = Br->getSuccessor(0);
132           BasicBlock *Succ1 = Br->getSuccessor(1);
133           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
134               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
135             continue;
136           if (dependsOnLocalPhi(L, Br->getCondition())) {
137             UP.Threshold += UnrollThresholdIf;
138             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
139                               << " for loop:\n"
140                               << *L << " due to " << *Br << '\n');
141             if (UP.Threshold >= MaxBoost)
142               return;
143           }
144         }
145         continue;
146       }
147 
148       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
149       if (!GEP)
150         continue;
151 
152       unsigned AS = GEP->getAddressSpace();
153       unsigned Threshold = 0;
154       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
155         Threshold = ThresholdPrivate;
156       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
157         Threshold = ThresholdLocal;
158       else
159         continue;
160 
161       if (UP.Threshold >= Threshold)
162         continue;
163 
164       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
165         const Value *Ptr = GEP->getPointerOperand();
166         const AllocaInst *Alloca =
167             dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
168         if (!Alloca || !Alloca->isStaticAlloca())
169           continue;
170         Type *Ty = Alloca->getAllocatedType();
171         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
172         if (AllocaSize > MaxAlloca)
173           continue;
174       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
175                  AS == AMDGPUAS::REGION_ADDRESS) {
176         LocalGEPsSeen++;
177         // Inhibit unroll for local memory if we have seen addressing not to
178         // a variable, most likely we will be unable to combine it.
179         // Do not unroll too deep inner loops for local memory to give a chance
180         // to unroll an outer loop for a more important reason.
181         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
182             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
183              !isa<Argument>(GEP->getPointerOperand())))
184           continue;
185         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
186                           << *L << " due to LDS use.\n");
187         UP.Runtime = UnrollRuntimeLocal;
188       }
189 
190       // Check if GEP depends on a value defined by this loop itself.
191       bool HasLoopDef = false;
192       for (const Value *Op : GEP->operands()) {
193         const Instruction *Inst = dyn_cast<Instruction>(Op);
194         if (!Inst || L->isLoopInvariant(Op))
195           continue;
196 
197         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
198              return SubLoop->contains(Inst); }))
199           continue;
200         HasLoopDef = true;
201         break;
202       }
203       if (!HasLoopDef)
204         continue;
205 
206       // We want to do whatever we can to limit the number of alloca
207       // instructions that make it through to the code generator.  allocas
208       // require us to use indirect addressing, which is slow and prone to
209       // compiler bugs.  If this loop does an address calculation on an
210       // alloca ptr, then we want to use a higher than normal loop unroll
211       // threshold. This will give SROA a better chance to eliminate these
212       // allocas.
213       //
214       // We also want to have more unrolling for local memory to let ds
215       // instructions with different offsets combine.
216       //
217       // Don't use the maximum allowed value here as it will make some
218       // programs way too big.
219       UP.Threshold = Threshold;
220       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
221                         << " for loop:\n"
222                         << *L << " due to " << *GEP << '\n');
223       if (UP.Threshold >= MaxBoost)
224         return;
225     }
226   }
227 }
228 
229 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
230   // The concept of vector registers doesn't really exist. Some packed vector
231   // operations operate on the normal 32-bit registers.
232   return 256;
233 }
234 
235 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
236   // This is really the number of registers to fill when vectorizing /
237   // interleaving loops, so we lie to avoid trying to use all registers.
238   return getHardwareNumberOfRegisters(Vec) >> 3;
239 }
240 
241 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
242   return 32;
243 }
244 
245 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
246   return 32;
247 }
248 
249 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
250                                             unsigned ChainSizeInBytes,
251                                             VectorType *VecTy) const {
252   unsigned VecRegBitWidth = VF * LoadSize;
253   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
254     // TODO: Support element-size less than 32bit?
255     return 128 / LoadSize;
256 
257   return VF;
258 }
259 
260 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
261                                              unsigned ChainSizeInBytes,
262                                              VectorType *VecTy) const {
263   unsigned VecRegBitWidth = VF * StoreSize;
264   if (VecRegBitWidth > 128)
265     return 128 / StoreSize;
266 
267   return VF;
268 }
269 
270 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
271   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
272       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
273       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
274       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
275     return 512;
276   }
277 
278   if (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
279       AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
280       AddrSpace == AMDGPUAS::REGION_ADDRESS)
281     return 128;
282 
283   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
284     return 8 * ST->getMaxPrivateElementSize();
285 
286   llvm_unreachable("unhandled address space");
287 }
288 
289 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
290                                                unsigned Alignment,
291                                                unsigned AddrSpace) const {
292   // We allow vectorization of flat stores, even though we may need to decompose
293   // them later if they may access private memory. We don't have enough context
294   // here, and legalization can handle it.
295   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
296     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
297       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
298   }
299   return true;
300 }
301 
302 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
303                                                 unsigned Alignment,
304                                                 unsigned AddrSpace) const {
305   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
306 }
307 
308 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
309                                                  unsigned Alignment,
310                                                  unsigned AddrSpace) const {
311   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
312 }
313 
314 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
315   // Disable unrolling if the loop is not vectorized.
316   // TODO: Enable this again.
317   if (VF == 1)
318     return 1;
319 
320   return 8;
321 }
322 
323 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
324                                        MemIntrinsicInfo &Info) const {
325   switch (Inst->getIntrinsicID()) {
326   case Intrinsic::amdgcn_atomic_inc:
327   case Intrinsic::amdgcn_atomic_dec:
328   case Intrinsic::amdgcn_ds_ordered_add:
329   case Intrinsic::amdgcn_ds_ordered_swap:
330   case Intrinsic::amdgcn_ds_fadd:
331   case Intrinsic::amdgcn_ds_fmin:
332   case Intrinsic::amdgcn_ds_fmax: {
333     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
334     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
335     if (!Ordering || !Volatile)
336       return false; // Invalid.
337 
338     unsigned OrderingVal = Ordering->getZExtValue();
339     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
340       return false;
341 
342     Info.PtrVal = Inst->getArgOperand(0);
343     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
344     Info.ReadMem = true;
345     Info.WriteMem = true;
346     Info.IsVolatile = !Volatile->isNullValue();
347     return true;
348   }
349   default:
350     return false;
351   }
352 }
353 
354 int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
355                                        TTI::OperandValueKind Opd1Info,
356                                        TTI::OperandValueKind Opd2Info,
357                                        TTI::OperandValueProperties Opd1PropInfo,
358                                        TTI::OperandValueProperties Opd2PropInfo,
359                                        ArrayRef<const Value *> Args,
360                                        const Instruction *CxtI) {
361   EVT OrigTy = TLI->getValueType(DL, Ty);
362   if (!OrigTy.isSimple()) {
363     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
364                                          Opd1PropInfo, Opd2PropInfo);
365   }
366 
367   // Legalize the type.
368   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
369   int ISD = TLI->InstructionOpcodeToISD(Opcode);
370 
371   // Because we don't have any legal vector operations, but the legal types, we
372   // need to account for split vectors.
373   unsigned NElts = LT.second.isVector() ?
374     LT.second.getVectorNumElements() : 1;
375 
376   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
377 
378   switch (ISD) {
379   case ISD::SHL:
380   case ISD::SRL:
381   case ISD::SRA:
382     if (SLT == MVT::i64)
383       return get64BitInstrCost() * LT.first * NElts;
384 
385     if (ST->has16BitInsts() && SLT == MVT::i16)
386       NElts = (NElts + 1) / 2;
387 
388     // i32
389     return getFullRateInstrCost() * LT.first * NElts;
390   case ISD::ADD:
391   case ISD::SUB:
392   case ISD::AND:
393   case ISD::OR:
394   case ISD::XOR:
395     if (SLT == MVT::i64) {
396       // and, or and xor are typically split into 2 VALU instructions.
397       return 2 * getFullRateInstrCost() * LT.first * NElts;
398     }
399 
400     if (ST->has16BitInsts() && SLT == MVT::i16)
401       NElts = (NElts + 1) / 2;
402 
403     return LT.first * NElts * getFullRateInstrCost();
404   case ISD::MUL: {
405     const int QuarterRateCost = getQuarterRateInstrCost();
406     if (SLT == MVT::i64) {
407       const int FullRateCost = getFullRateInstrCost();
408       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
409     }
410 
411     if (ST->has16BitInsts() && SLT == MVT::i16)
412       NElts = (NElts + 1) / 2;
413 
414     // i32
415     return QuarterRateCost * NElts * LT.first;
416   }
417   case ISD::FADD:
418   case ISD::FSUB:
419   case ISD::FMUL:
420     if (SLT == MVT::f64)
421       return LT.first * NElts * get64BitInstrCost();
422 
423     if (ST->has16BitInsts() && SLT == MVT::f16)
424       NElts = (NElts + 1) / 2;
425 
426     if (SLT == MVT::f32 || SLT == MVT::f16)
427       return LT.first * NElts * getFullRateInstrCost();
428     break;
429   case ISD::FDIV:
430   case ISD::FREM:
431     // FIXME: frem should be handled separately. The fdiv in it is most of it,
432     // but the current lowering is also not entirely correct.
433     if (SLT == MVT::f64) {
434       int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
435       // Add cost of workaround.
436       if (!ST->hasUsableDivScaleConditionOutput())
437         Cost += 3 * getFullRateInstrCost();
438 
439       return LT.first * Cost * NElts;
440     }
441 
442     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
443       // TODO: This is more complicated, unsafe flags etc.
444       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
445           (SLT == MVT::f16 && ST->has16BitInsts())) {
446         return LT.first * getQuarterRateInstrCost() * NElts;
447       }
448     }
449 
450     if (SLT == MVT::f16 && ST->has16BitInsts()) {
451       // 2 x v_cvt_f32_f16
452       // f32 rcp
453       // f32 fmul
454       // v_cvt_f16_f32
455       // f16 div_fixup
456       int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
457       return LT.first * Cost * NElts;
458     }
459 
460     if (SLT == MVT::f32 || SLT == MVT::f16) {
461       int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
462 
463       if (!HasFP32Denormals) {
464         // FP mode switches.
465         Cost += 2 * getFullRateInstrCost();
466       }
467 
468       return LT.first * NElts * Cost;
469     }
470     break;
471   default:
472     break;
473   }
474 
475   return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
476                                        Opd1PropInfo, Opd2PropInfo);
477 }
478 
479 // Return true if there's a potential benefit from using v2f16 instructions for
480 // an intrinsic, even if it requires nontrivial legalization.
481 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
482   switch (ID) {
483   case Intrinsic::fma: // TODO: fmuladd
484   // There's a small benefit to using vector ops in the legalized code.
485   case Intrinsic::round:
486     return true;
487   default:
488     return false;
489   }
490 }
491 
492 template <typename T>
493 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
494                                       ArrayRef<T *> Args, FastMathFlags FMF,
495                                       unsigned VF, const Instruction *I) {
496   if (!intrinsicHasPackedVectorBenefit(ID))
497     return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF, I);
498 
499   EVT OrigTy = TLI->getValueType(DL, RetTy);
500   if (!OrigTy.isSimple()) {
501     return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF, I);
502   }
503 
504   // Legalize the type.
505   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
506 
507   unsigned NElts = LT.second.isVector() ?
508     LT.second.getVectorNumElements() : 1;
509 
510   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
511 
512   if (SLT == MVT::f64)
513     return LT.first * NElts * get64BitInstrCost();
514 
515   if (ST->has16BitInsts() && SLT == MVT::f16)
516     NElts = (NElts + 1) / 2;
517 
518   // TODO: Get more refined intrinsic costs?
519   unsigned InstRate = getQuarterRateInstrCost();
520   if (ID == Intrinsic::fma) {
521     InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost()
522                                    : getQuarterRateInstrCost();
523   }
524 
525   return LT.first * NElts * InstRate;
526 }
527 
528 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
529                                       ArrayRef<Value *> Args, FastMathFlags FMF,
530                                       unsigned VF, const Instruction *I) {
531   return getIntrinsicInstrCost<Value>(ID, RetTy, Args, FMF, VF, I);
532 }
533 
534 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
535                                       ArrayRef<Type *> Tys, FastMathFlags FMF,
536                                       unsigned ScalarizationCostPassed,
537                                       const Instruction *I) {
538   return getIntrinsicInstrCost<Type>(ID, RetTy, Tys, FMF,
539                                      ScalarizationCostPassed, I);
540 }
541 
542 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) {
543   // XXX - For some reason this isn't called for switch.
544   switch (Opcode) {
545   case Instruction::Br:
546   case Instruction::Ret:
547     return 10;
548   default:
549     return BaseT::getCFInstrCost(Opcode);
550   }
551 }
552 
553 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *Ty,
554                                               bool IsPairwise) {
555   EVT OrigTy = TLI->getValueType(DL, Ty);
556 
557   // Computes cost on targets that have packed math instructions(which support
558   // 16-bit types only).
559   if (IsPairwise ||
560       !ST->hasVOP3PInsts() ||
561       OrigTy.getScalarSizeInBits() != 16)
562     return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise);
563 
564   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
565   return LT.first * getFullRateInstrCost();
566 }
567 
568 int GCNTTIImpl::getMinMaxReductionCost(Type *Ty, Type *CondTy,
569                                           bool IsPairwise,
570                                           bool IsUnsigned) {
571   EVT OrigTy = TLI->getValueType(DL, Ty);
572 
573   // Computes cost on targets that have packed math instructions(which support
574   // 16-bit types only).
575   if (IsPairwise ||
576       !ST->hasVOP3PInsts() ||
577       OrigTy.getScalarSizeInBits() != 16)
578     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned);
579 
580   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
581   return LT.first * getHalfRateInstrCost();
582 }
583 
584 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
585                                       unsigned Index) {
586   switch (Opcode) {
587   case Instruction::ExtractElement:
588   case Instruction::InsertElement: {
589     unsigned EltSize
590       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
591     if (EltSize < 32) {
592       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
593         return 0;
594       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
595     }
596 
597     // Extracts are just reads of a subregister, so are free. Inserts are
598     // considered free because we don't want to have any cost for scalarizing
599     // operations, and we don't have to copy into a different register class.
600 
601     // Dynamic indexing isn't free and is best avoided.
602     return Index == ~0u ? 2 : 0;
603   }
604   default:
605     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
606   }
607 }
608 
609 static bool isArgPassedInSGPR(const Argument *A) {
610   const Function *F = A->getParent();
611 
612   // Arguments to compute shaders are never a source of divergence.
613   CallingConv::ID CC = F->getCallingConv();
614   switch (CC) {
615   case CallingConv::AMDGPU_KERNEL:
616   case CallingConv::SPIR_KERNEL:
617     return true;
618   case CallingConv::AMDGPU_VS:
619   case CallingConv::AMDGPU_LS:
620   case CallingConv::AMDGPU_HS:
621   case CallingConv::AMDGPU_ES:
622   case CallingConv::AMDGPU_GS:
623   case CallingConv::AMDGPU_PS:
624   case CallingConv::AMDGPU_CS:
625     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
626     // Everything else is in VGPRs.
627     return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
628            F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
629   default:
630     // TODO: Should calls support inreg for SGPR inputs?
631     return false;
632   }
633 }
634 
635 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
636 /// this is analyzing the collective result of all output registers. Otherwise,
637 /// this is only querying a specific result index if this returns multiple
638 /// registers in a struct.
639 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
640   const CallInst *CI, ArrayRef<unsigned> Indices) const {
641   // TODO: Handle complex extract indices
642   if (Indices.size() > 1)
643     return true;
644 
645   const DataLayout &DL = CI->getModule()->getDataLayout();
646   const SIRegisterInfo *TRI = ST->getRegisterInfo();
647   ImmutableCallSite CS(CI);
648   TargetLowering::AsmOperandInfoVector TargetConstraints
649     = TLI->ParseConstraints(DL, ST->getRegisterInfo(), CS);
650 
651   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
652 
653   int OutputIdx = 0;
654   for (auto &TC : TargetConstraints) {
655     if (TC.Type != InlineAsm::isOutput)
656       continue;
657 
658     // Skip outputs we don't care about.
659     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
660       continue;
661 
662     TLI->ComputeConstraintToUse(TC, SDValue());
663 
664     Register AssignedReg;
665     const TargetRegisterClass *RC;
666     std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
667       TRI, TC.ConstraintCode, TC.ConstraintVT);
668     if (AssignedReg) {
669       // FIXME: This is a workaround for getRegForInlineAsmConstraint
670       // returning VS_32
671       RC = TRI->getPhysRegClass(AssignedReg);
672     }
673 
674     // For AGPR constraints null is returned on subtargets without AGPRs, so
675     // assume divergent for null.
676     if (!RC || !TRI->isSGPRClass(RC))
677       return true;
678   }
679 
680   return false;
681 }
682 
683 /// \returns true if the new GPU divergence analysis is enabled.
684 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
685   return !UseLegacyDA;
686 }
687 
688 /// \returns true if the result of the value could potentially be
689 /// different across workitems in a wavefront.
690 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
691   if (const Argument *A = dyn_cast<Argument>(V))
692     return !isArgPassedInSGPR(A);
693 
694   // Loads from the private and flat address spaces are divergent, because
695   // threads can execute the load instruction with the same inputs and get
696   // different results.
697   //
698   // All other loads are not divergent, because if threads issue loads with the
699   // same arguments, they will always get the same result.
700   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
701     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
702            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
703 
704   // Atomics are divergent because they are executed sequentially: when an
705   // atomic operation refers to the same address in each thread, then each
706   // thread after the first sees the value written by the previous thread as
707   // original value.
708   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
709     return true;
710 
711   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
712     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
713 
714   // Assume all function calls are a source of divergence.
715   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
716     if (isa<InlineAsm>(CI->getCalledValue()))
717       return isInlineAsmSourceOfDivergence(CI);
718     return true;
719   }
720 
721   // Assume all function calls are a source of divergence.
722   if (isa<InvokeInst>(V))
723     return true;
724 
725   return false;
726 }
727 
728 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
729   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
730     switch (Intrinsic->getIntrinsicID()) {
731     default:
732       return false;
733     case Intrinsic::amdgcn_readfirstlane:
734     case Intrinsic::amdgcn_readlane:
735     case Intrinsic::amdgcn_icmp:
736     case Intrinsic::amdgcn_fcmp:
737     case Intrinsic::amdgcn_if_break:
738       return true;
739     }
740   }
741 
742   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
743     if (isa<InlineAsm>(CI->getCalledValue()))
744       return !isInlineAsmSourceOfDivergence(CI);
745     return false;
746   }
747 
748   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
749   if (!ExtValue)
750     return false;
751 
752   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
753   if (!CI)
754     return false;
755 
756   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
757     switch (Intrinsic->getIntrinsicID()) {
758     default:
759       return false;
760     case Intrinsic::amdgcn_if:
761     case Intrinsic::amdgcn_else: {
762       ArrayRef<unsigned> Indices = ExtValue->getIndices();
763       return Indices.size() == 1 && Indices[0] == 1;
764     }
765     }
766   }
767 
768   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
769   // divergent for the overall struct return. We need to override it in the
770   // case we're extracting an SGPR component here.
771   if (isa<InlineAsm>(CI->getCalledValue()))
772     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
773 
774   return false;
775 }
776 
777 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
778                                             Intrinsic::ID IID) const {
779   switch (IID) {
780   case Intrinsic::amdgcn_atomic_inc:
781   case Intrinsic::amdgcn_atomic_dec:
782   case Intrinsic::amdgcn_ds_fadd:
783   case Intrinsic::amdgcn_ds_fmin:
784   case Intrinsic::amdgcn_ds_fmax:
785   case Intrinsic::amdgcn_is_shared:
786   case Intrinsic::amdgcn_is_private:
787     OpIndexes.push_back(0);
788     return true;
789   default:
790     return false;
791   }
792 }
793 
794 bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace(
795   IntrinsicInst *II, Value *OldV, Value *NewV) const {
796   auto IntrID = II->getIntrinsicID();
797   switch (IntrID) {
798   case Intrinsic::amdgcn_atomic_inc:
799   case Intrinsic::amdgcn_atomic_dec:
800   case Intrinsic::amdgcn_ds_fadd:
801   case Intrinsic::amdgcn_ds_fmin:
802   case Intrinsic::amdgcn_ds_fmax: {
803     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
804     if (!IsVolatile->isZero())
805       return false;
806     Module *M = II->getParent()->getParent()->getParent();
807     Type *DestTy = II->getType();
808     Type *SrcTy = NewV->getType();
809     Function *NewDecl =
810         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
811     II->setArgOperand(0, NewV);
812     II->setCalledFunction(NewDecl);
813     return true;
814   }
815   case Intrinsic::amdgcn_is_shared:
816   case Intrinsic::amdgcn_is_private: {
817     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
818       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
819     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
820     LLVMContext &Ctx = NewV->getType()->getContext();
821     ConstantInt *NewVal = (TrueAS == NewAS) ?
822       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
823     II->replaceAllUsesWith(NewVal);
824     II->eraseFromParent();
825     return true;
826   }
827   default:
828     return false;
829   }
830 }
831 
832 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
833                                        Type *SubTp) {
834   if (ST->hasVOP3PInsts()) {
835     VectorType *VT = cast<VectorType>(Tp);
836     if (VT->getNumElements() == 2 &&
837         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
838       // With op_sel VOP3P instructions freely can access the low half or high
839       // half of a register, so any swizzle is free.
840 
841       switch (Kind) {
842       case TTI::SK_Broadcast:
843       case TTI::SK_Reverse:
844       case TTI::SK_PermuteSingleSrc:
845         return 0;
846       default:
847         break;
848       }
849     }
850   }
851 
852   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
853 }
854 
855 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
856                                      const Function *Callee) const {
857   const TargetMachine &TM = getTLI()->getTargetMachine();
858   const GCNSubtarget *CallerST
859     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
860   const GCNSubtarget *CalleeST
861     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
862 
863   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
864   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
865 
866   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
867   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
868   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
869     return false;
870 
871   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
872   // no way to support merge for backend defined attributes.
873   AMDGPU::SIModeRegisterDefaults CallerMode(*Caller, *CallerST);
874   AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee, *CalleeST);
875   return CallerMode.isInlineCompatible(CalleeMode);
876 }
877 
878 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
879                                          TTI::UnrollingPreferences &UP) {
880   CommonTTI.getUnrollingPreferences(L, SE, UP);
881 }
882 
883 unsigned GCNTTIImpl::getUserCost(const User *U,
884                                  ArrayRef<const Value *> Operands) {
885   const Instruction *I = dyn_cast<Instruction>(U);
886   if (!I)
887     return BaseT::getUserCost(U, Operands);
888 
889   // Estimate different operations to be optimized out
890   switch (I->getOpcode()) {
891   case Instruction::ExtractElement: {
892     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
893     unsigned Idx = -1;
894     if (CI)
895       Idx = CI->getZExtValue();
896     return getVectorInstrCost(I->getOpcode(), I->getOperand(0)->getType(), Idx);
897   }
898   case Instruction::InsertElement: {
899     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
900     unsigned Idx = -1;
901     if (CI)
902       Idx = CI->getZExtValue();
903     return getVectorInstrCost(I->getOpcode(), I->getType(), Idx);
904   }
905   case Instruction::Call: {
906     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
907       SmallVector<Value *, 4> Args(II->arg_operands());
908       FastMathFlags FMF;
909       if (auto *FPMO = dyn_cast<FPMathOperator>(II))
910         FMF = FPMO->getFastMathFlags();
911       return getIntrinsicInstrCost(II->getIntrinsicID(), II->getType(), Args,
912                                    FMF, 1, II);
913     } else {
914       return BaseT::getUserCost(U, Operands);
915     }
916   }
917   case Instruction::ShuffleVector: {
918     const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
919     Type *Ty = Shuffle->getType();
920     Type *SrcTy = Shuffle->getOperand(0)->getType();
921 
922     // TODO: Identify and add costs for insert subvector, etc.
923     int SubIndex;
924     if (Shuffle->isExtractSubvectorMask(SubIndex))
925       return getShuffleCost(TTI::SK_ExtractSubvector, SrcTy, SubIndex, Ty);
926 
927     if (Shuffle->changesLength())
928       return BaseT::getUserCost(U, Operands);
929 
930     if (Shuffle->isIdentity())
931       return 0;
932 
933     if (Shuffle->isReverse())
934       return getShuffleCost(TTI::SK_Reverse, Ty, 0, nullptr);
935 
936     if (Shuffle->isSelect())
937       return getShuffleCost(TTI::SK_Select, Ty, 0, nullptr);
938 
939     if (Shuffle->isTranspose())
940       return getShuffleCost(TTI::SK_Transpose, Ty, 0, nullptr);
941 
942     if (Shuffle->isZeroEltSplat())
943       return getShuffleCost(TTI::SK_Broadcast, Ty, 0, nullptr);
944 
945     if (Shuffle->isSingleSource())
946       return getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, nullptr);
947 
948     return getShuffleCost(TTI::SK_PermuteTwoSrc, Ty, 0, nullptr);
949   }
950   case Instruction::ZExt:
951   case Instruction::SExt:
952   case Instruction::FPToUI:
953   case Instruction::FPToSI:
954   case Instruction::FPExt:
955   case Instruction::PtrToInt:
956   case Instruction::IntToPtr:
957   case Instruction::SIToFP:
958   case Instruction::UIToFP:
959   case Instruction::Trunc:
960   case Instruction::FPTrunc:
961   case Instruction::BitCast:
962   case Instruction::AddrSpaceCast: {
963     return getCastInstrCost(I->getOpcode(), I->getType(),
964                             I->getOperand(0)->getType(), I);
965   }
966   case Instruction::Add:
967   case Instruction::FAdd:
968   case Instruction::Sub:
969   case Instruction::FSub:
970   case Instruction::Mul:
971   case Instruction::FMul:
972   case Instruction::UDiv:
973   case Instruction::SDiv:
974   case Instruction::FDiv:
975   case Instruction::URem:
976   case Instruction::SRem:
977   case Instruction::FRem:
978   case Instruction::Shl:
979   case Instruction::LShr:
980   case Instruction::AShr:
981   case Instruction::And:
982   case Instruction::Or:
983   case Instruction::Xor:
984   case Instruction::FNeg: {
985     return getArithmeticInstrCost(I->getOpcode(), I->getType(),
986                                   TTI::OK_AnyValue, TTI::OK_AnyValue,
987                                   TTI::OP_None, TTI::OP_None, Operands, I);
988   }
989   default:
990     break;
991   }
992 
993   return BaseT::getUserCost(U, Operands);
994 }
995 
996 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
997   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
998 }
999 
1000 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
1001   return getHardwareNumberOfRegisters(Vec);
1002 }
1003 
1004 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
1005   return 32;
1006 }
1007 
1008 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
1009   return 32;
1010 }
1011 
1012 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1013   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
1014       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
1015     return 128;
1016   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1017       AddrSpace == AMDGPUAS::REGION_ADDRESS)
1018     return 64;
1019   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1020     return 32;
1021 
1022   if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1023       AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1024       (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1025       AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1026     return 128;
1027   llvm_unreachable("unhandled address space");
1028 }
1029 
1030 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1031                                              unsigned Alignment,
1032                                              unsigned AddrSpace) const {
1033   // We allow vectorization of flat stores, even though we may need to decompose
1034   // them later if they may access private memory. We don't have enough context
1035   // here, and legalization can handle it.
1036   return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1037 }
1038 
1039 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1040                                               unsigned Alignment,
1041                                               unsigned AddrSpace) const {
1042   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1043 }
1044 
1045 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1046                                                unsigned Alignment,
1047                                                unsigned AddrSpace) const {
1048   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1049 }
1050 
1051 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1052   // Disable unrolling if the loop is not vectorized.
1053   // TODO: Enable this again.
1054   if (VF == 1)
1055     return 1;
1056 
1057   return 8;
1058 }
1059 
1060 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode) {
1061   // XXX - For some reason this isn't called for switch.
1062   switch (Opcode) {
1063   case Instruction::Br:
1064   case Instruction::Ret:
1065     return 10;
1066   default:
1067     return BaseT::getCFInstrCost(Opcode);
1068   }
1069 }
1070 
1071 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1072                                     unsigned Index) {
1073   switch (Opcode) {
1074   case Instruction::ExtractElement:
1075   case Instruction::InsertElement: {
1076     unsigned EltSize
1077       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1078     if (EltSize < 32) {
1079       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1080     }
1081 
1082     // Extracts are just reads of a subregister, so are free. Inserts are
1083     // considered free because we don't want to have any cost for scalarizing
1084     // operations, and we don't have to copy into a different register class.
1085 
1086     // Dynamic indexing isn't free and is best avoided.
1087     return Index == ~0u ? 2 : 0;
1088   }
1089   default:
1090     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1091   }
1092 }
1093 
1094 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1095                                           TTI::UnrollingPreferences &UP) {
1096   CommonTTI.getUnrollingPreferences(L, SE, UP);
1097 }
1098