1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/ISDOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MachineValueType.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include <algorithm>
49 #include <cassert>
50 #include <limits>
51 #include <utility>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "AMDGPUtti"
56 
57 static cl::opt<unsigned> UnrollThresholdPrivate(
58   "amdgpu-unroll-threshold-private",
59   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
60   cl::init(2700), cl::Hidden);
61 
62 static cl::opt<unsigned> UnrollThresholdLocal(
63   "amdgpu-unroll-threshold-local",
64   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
65   cl::init(1000), cl::Hidden);
66 
67 static cl::opt<unsigned> UnrollThresholdIf(
68   "amdgpu-unroll-threshold-if",
69   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
70   cl::init(150), cl::Hidden);
71 
72 static cl::opt<bool> UnrollRuntimeLocal(
73   "amdgpu-unroll-runtime-local",
74   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
75   cl::init(true), cl::Hidden);
76 
77 static cl::opt<bool> UseLegacyDA(
78   "amdgpu-use-legacy-divergence-analysis",
79   cl::desc("Enable legacy divergence analysis for AMDGPU"),
80   cl::init(false), cl::Hidden);
81 
82 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
83                               unsigned Depth = 0) {
84   const Instruction *I = dyn_cast<Instruction>(Cond);
85   if (!I)
86     return false;
87 
88   for (const Value *V : I->operand_values()) {
89     if (!L->contains(I))
90       continue;
91     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
92       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
93                   return SubLoop->contains(PHI); }))
94         return true;
95     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
96       return true;
97   }
98   return false;
99 }
100 
101 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
102                                             TTI::UnrollingPreferences &UP) {
103   const Function &F = *L->getHeader()->getParent();
104   UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
105   UP.MaxCount = std::numeric_limits<unsigned>::max();
106   UP.Partial = true;
107 
108   // TODO: Do we want runtime unrolling?
109 
110   // Maximum alloca size than can fit registers. Reserve 16 registers.
111   const unsigned MaxAlloca = (256 - 16) * 4;
112   unsigned ThresholdPrivate = UnrollThresholdPrivate;
113   unsigned ThresholdLocal = UnrollThresholdLocal;
114   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
115   for (const BasicBlock *BB : L->getBlocks()) {
116     const DataLayout &DL = BB->getModule()->getDataLayout();
117     unsigned LocalGEPsSeen = 0;
118 
119     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
120                return SubLoop->contains(BB); }))
121         continue; // Block belongs to an inner loop.
122 
123     for (const Instruction &I : *BB) {
124       // Unroll a loop which contains an "if" statement whose condition
125       // defined by a PHI belonging to the loop. This may help to eliminate
126       // if region and potentially even PHI itself, saving on both divergence
127       // and registers used for the PHI.
128       // Add a small bonus for each of such "if" statements.
129       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
130         if (UP.Threshold < MaxBoost && Br->isConditional()) {
131           BasicBlock *Succ0 = Br->getSuccessor(0);
132           BasicBlock *Succ1 = Br->getSuccessor(1);
133           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
134               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
135             continue;
136           if (dependsOnLocalPhi(L, Br->getCondition())) {
137             UP.Threshold += UnrollThresholdIf;
138             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
139                               << " for loop:\n"
140                               << *L << " due to " << *Br << '\n');
141             if (UP.Threshold >= MaxBoost)
142               return;
143           }
144         }
145         continue;
146       }
147 
148       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
149       if (!GEP)
150         continue;
151 
152       unsigned AS = GEP->getAddressSpace();
153       unsigned Threshold = 0;
154       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
155         Threshold = ThresholdPrivate;
156       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
157         Threshold = ThresholdLocal;
158       else
159         continue;
160 
161       if (UP.Threshold >= Threshold)
162         continue;
163 
164       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
165         const Value *Ptr = GEP->getPointerOperand();
166         const AllocaInst *Alloca =
167             dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
168         if (!Alloca || !Alloca->isStaticAlloca())
169           continue;
170         Type *Ty = Alloca->getAllocatedType();
171         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
172         if (AllocaSize > MaxAlloca)
173           continue;
174       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
175                  AS == AMDGPUAS::REGION_ADDRESS) {
176         LocalGEPsSeen++;
177         // Inhibit unroll for local memory if we have seen addressing not to
178         // a variable, most likely we will be unable to combine it.
179         // Do not unroll too deep inner loops for local memory to give a chance
180         // to unroll an outer loop for a more important reason.
181         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
182             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
183              !isa<Argument>(GEP->getPointerOperand())))
184           continue;
185         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
186                           << *L << " due to LDS use.\n");
187         UP.Runtime = UnrollRuntimeLocal;
188       }
189 
190       // Check if GEP depends on a value defined by this loop itself.
191       bool HasLoopDef = false;
192       for (const Value *Op : GEP->operands()) {
193         const Instruction *Inst = dyn_cast<Instruction>(Op);
194         if (!Inst || L->isLoopInvariant(Op))
195           continue;
196 
197         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
198              return SubLoop->contains(Inst); }))
199           continue;
200         HasLoopDef = true;
201         break;
202       }
203       if (!HasLoopDef)
204         continue;
205 
206       // We want to do whatever we can to limit the number of alloca
207       // instructions that make it through to the code generator.  allocas
208       // require us to use indirect addressing, which is slow and prone to
209       // compiler bugs.  If this loop does an address calculation on an
210       // alloca ptr, then we want to use a higher than normal loop unroll
211       // threshold. This will give SROA a better chance to eliminate these
212       // allocas.
213       //
214       // We also want to have more unrolling for local memory to let ds
215       // instructions with different offsets combine.
216       //
217       // Don't use the maximum allowed value here as it will make some
218       // programs way too big.
219       UP.Threshold = Threshold;
220       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
221                         << " for loop:\n"
222                         << *L << " due to " << *GEP << '\n');
223       if (UP.Threshold >= MaxBoost)
224         return;
225     }
226   }
227 }
228 
229 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
230   // The concept of vector registers doesn't really exist. Some packed vector
231   // operations operate on the normal 32-bit registers.
232   return 256;
233 }
234 
235 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
236   // This is really the number of registers to fill when vectorizing /
237   // interleaving loops, so we lie to avoid trying to use all registers.
238   return getHardwareNumberOfRegisters(Vec) >> 3;
239 }
240 
241 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
242   return 32;
243 }
244 
245 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
246   return 32;
247 }
248 
249 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
250                                             unsigned ChainSizeInBytes,
251                                             VectorType *VecTy) const {
252   unsigned VecRegBitWidth = VF * LoadSize;
253   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
254     // TODO: Support element-size less than 32bit?
255     return 128 / LoadSize;
256 
257   return VF;
258 }
259 
260 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
261                                              unsigned ChainSizeInBytes,
262                                              VectorType *VecTy) const {
263   unsigned VecRegBitWidth = VF * StoreSize;
264   if (VecRegBitWidth > 128)
265     return 128 / StoreSize;
266 
267   return VF;
268 }
269 
270 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
271   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
272       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
273       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
274       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
275     return 512;
276   }
277 
278   if (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
279       AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
280       AddrSpace == AMDGPUAS::REGION_ADDRESS)
281     return 128;
282 
283   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
284     return 8 * ST->getMaxPrivateElementSize();
285 
286   llvm_unreachable("unhandled address space");
287 }
288 
289 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
290                                                unsigned Alignment,
291                                                unsigned AddrSpace) const {
292   // We allow vectorization of flat stores, even though we may need to decompose
293   // them later if they may access private memory. We don't have enough context
294   // here, and legalization can handle it.
295   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
296     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
297       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
298   }
299   return true;
300 }
301 
302 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
303                                                 unsigned Alignment,
304                                                 unsigned AddrSpace) const {
305   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
306 }
307 
308 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
309                                                  unsigned Alignment,
310                                                  unsigned AddrSpace) const {
311   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
312 }
313 
314 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
315   // Disable unrolling if the loop is not vectorized.
316   // TODO: Enable this again.
317   if (VF == 1)
318     return 1;
319 
320   return 8;
321 }
322 
323 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
324                                        MemIntrinsicInfo &Info) const {
325   switch (Inst->getIntrinsicID()) {
326   case Intrinsic::amdgcn_atomic_inc:
327   case Intrinsic::amdgcn_atomic_dec:
328   case Intrinsic::amdgcn_ds_ordered_add:
329   case Intrinsic::amdgcn_ds_ordered_swap:
330   case Intrinsic::amdgcn_ds_fadd:
331   case Intrinsic::amdgcn_ds_fmin:
332   case Intrinsic::amdgcn_ds_fmax: {
333     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
334     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
335     if (!Ordering || !Volatile)
336       return false; // Invalid.
337 
338     unsigned OrderingVal = Ordering->getZExtValue();
339     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
340       return false;
341 
342     Info.PtrVal = Inst->getArgOperand(0);
343     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
344     Info.ReadMem = true;
345     Info.WriteMem = true;
346     Info.IsVolatile = !Volatile->isNullValue();
347     return true;
348   }
349   default:
350     return false;
351   }
352 }
353 
354 int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
355                                        TTI::OperandValueKind Opd1Info,
356                                        TTI::OperandValueKind Opd2Info,
357                                        TTI::OperandValueProperties Opd1PropInfo,
358                                        TTI::OperandValueProperties Opd2PropInfo,
359                                        ArrayRef<const Value *> Args,
360                                        const Instruction *CxtI) {
361   EVT OrigTy = TLI->getValueType(DL, Ty);
362   if (!OrigTy.isSimple()) {
363     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
364                                          Opd1PropInfo, Opd2PropInfo);
365   }
366 
367   // Legalize the type.
368   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
369   int ISD = TLI->InstructionOpcodeToISD(Opcode);
370 
371   // Because we don't have any legal vector operations, but the legal types, we
372   // need to account for split vectors.
373   unsigned NElts = LT.second.isVector() ?
374     LT.second.getVectorNumElements() : 1;
375 
376   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
377 
378   switch (ISD) {
379   case ISD::SHL:
380   case ISD::SRL:
381   case ISD::SRA:
382     if (SLT == MVT::i64)
383       return get64BitInstrCost() * LT.first * NElts;
384 
385     if (ST->has16BitInsts() && SLT == MVT::i16)
386       NElts = (NElts + 1) / 2;
387 
388     // i32
389     return getFullRateInstrCost() * LT.first * NElts;
390   case ISD::ADD:
391   case ISD::SUB:
392   case ISD::AND:
393   case ISD::OR:
394   case ISD::XOR:
395     if (SLT == MVT::i64) {
396       // and, or and xor are typically split into 2 VALU instructions.
397       return 2 * getFullRateInstrCost() * LT.first * NElts;
398     }
399 
400     if (ST->has16BitInsts() && SLT == MVT::i16)
401       NElts = (NElts + 1) / 2;
402 
403     return LT.first * NElts * getFullRateInstrCost();
404   case ISD::MUL: {
405     const int QuarterRateCost = getQuarterRateInstrCost();
406     if (SLT == MVT::i64) {
407       const int FullRateCost = getFullRateInstrCost();
408       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
409     }
410 
411     if (ST->has16BitInsts() && SLT == MVT::i16)
412       NElts = (NElts + 1) / 2;
413 
414     // i32
415     return QuarterRateCost * NElts * LT.first;
416   }
417   case ISD::FADD:
418   case ISD::FSUB:
419   case ISD::FMUL:
420     if (SLT == MVT::f64)
421       return LT.first * NElts * get64BitInstrCost();
422 
423     if (ST->has16BitInsts() && SLT == MVT::f16)
424       NElts = (NElts + 1) / 2;
425 
426     if (SLT == MVT::f32 || SLT == MVT::f16)
427       return LT.first * NElts * getFullRateInstrCost();
428     break;
429   case ISD::FDIV:
430   case ISD::FREM:
431     // FIXME: frem should be handled separately. The fdiv in it is most of it,
432     // but the current lowering is also not entirely correct.
433     if (SLT == MVT::f64) {
434       int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
435       // Add cost of workaround.
436       if (!ST->hasUsableDivScaleConditionOutput())
437         Cost += 3 * getFullRateInstrCost();
438 
439       return LT.first * Cost * NElts;
440     }
441 
442     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
443       // TODO: This is more complicated, unsafe flags etc.
444       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
445           (SLT == MVT::f16 && ST->has16BitInsts())) {
446         return LT.first * getQuarterRateInstrCost() * NElts;
447       }
448     }
449 
450     if (SLT == MVT::f16 && ST->has16BitInsts()) {
451       // 2 x v_cvt_f32_f16
452       // f32 rcp
453       // f32 fmul
454       // v_cvt_f16_f32
455       // f16 div_fixup
456       int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
457       return LT.first * Cost * NElts;
458     }
459 
460     if (SLT == MVT::f32 || SLT == MVT::f16) {
461       int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
462 
463       if (!HasFP32Denormals) {
464         // FP mode switches.
465         Cost += 2 * getFullRateInstrCost();
466       }
467 
468       return LT.first * NElts * Cost;
469     }
470     break;
471   default:
472     break;
473   }
474 
475   return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
476                                        Opd1PropInfo, Opd2PropInfo);
477 }
478 
479 template <typename T>
480 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
481                                       ArrayRef<T *> Args,
482                                       FastMathFlags FMF, unsigned VF) {
483   if (ID != Intrinsic::fma)
484     return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
485 
486   EVT OrigTy = TLI->getValueType(DL, RetTy);
487   if (!OrigTy.isSimple()) {
488     return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
489   }
490 
491   // Legalize the type.
492   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
493 
494   unsigned NElts = LT.second.isVector() ?
495     LT.second.getVectorNumElements() : 1;
496 
497   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
498 
499   if (SLT == MVT::f64)
500     return LT.first * NElts * get64BitInstrCost();
501 
502   if (ST->has16BitInsts() && SLT == MVT::f16)
503     NElts = (NElts + 1) / 2;
504 
505   return LT.first * NElts * (ST->hasFastFMAF32() ? getHalfRateInstrCost()
506                                                  : getQuarterRateInstrCost());
507 }
508 
509 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
510                                       ArrayRef<Value*> Args, FastMathFlags FMF,
511                                       unsigned VF) {
512   return getIntrinsicInstrCost<Value>(ID, RetTy, Args, FMF, VF);
513 }
514 
515 int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
516                                       ArrayRef<Type *> Tys, FastMathFlags FMF,
517                                       unsigned ScalarizationCostPassed) {
518   return getIntrinsicInstrCost<Type>(ID, RetTy, Tys, FMF,
519                                      ScalarizationCostPassed);
520 }
521 
522 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) {
523   // XXX - For some reason this isn't called for switch.
524   switch (Opcode) {
525   case Instruction::Br:
526   case Instruction::Ret:
527     return 10;
528   default:
529     return BaseT::getCFInstrCost(Opcode);
530   }
531 }
532 
533 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *Ty,
534                                               bool IsPairwise) {
535   EVT OrigTy = TLI->getValueType(DL, Ty);
536 
537   // Computes cost on targets that have packed math instructions(which support
538   // 16-bit types only).
539   if (IsPairwise ||
540       !ST->hasVOP3PInsts() ||
541       OrigTy.getScalarSizeInBits() != 16)
542     return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise);
543 
544   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
545   return LT.first * getFullRateInstrCost();
546 }
547 
548 int GCNTTIImpl::getMinMaxReductionCost(Type *Ty, Type *CondTy,
549                                           bool IsPairwise,
550                                           bool IsUnsigned) {
551   EVT OrigTy = TLI->getValueType(DL, Ty);
552 
553   // Computes cost on targets that have packed math instructions(which support
554   // 16-bit types only).
555   if (IsPairwise ||
556       !ST->hasVOP3PInsts() ||
557       OrigTy.getScalarSizeInBits() != 16)
558     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned);
559 
560   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
561   return LT.first * getHalfRateInstrCost();
562 }
563 
564 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
565                                       unsigned Index) {
566   switch (Opcode) {
567   case Instruction::ExtractElement:
568   case Instruction::InsertElement: {
569     unsigned EltSize
570       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
571     if (EltSize < 32) {
572       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
573         return 0;
574       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
575     }
576 
577     // Extracts are just reads of a subregister, so are free. Inserts are
578     // considered free because we don't want to have any cost for scalarizing
579     // operations, and we don't have to copy into a different register class.
580 
581     // Dynamic indexing isn't free and is best avoided.
582     return Index == ~0u ? 2 : 0;
583   }
584   default:
585     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
586   }
587 }
588 
589 static bool isArgPassedInSGPR(const Argument *A) {
590   const Function *F = A->getParent();
591 
592   // Arguments to compute shaders are never a source of divergence.
593   CallingConv::ID CC = F->getCallingConv();
594   switch (CC) {
595   case CallingConv::AMDGPU_KERNEL:
596   case CallingConv::SPIR_KERNEL:
597     return true;
598   case CallingConv::AMDGPU_VS:
599   case CallingConv::AMDGPU_LS:
600   case CallingConv::AMDGPU_HS:
601   case CallingConv::AMDGPU_ES:
602   case CallingConv::AMDGPU_GS:
603   case CallingConv::AMDGPU_PS:
604   case CallingConv::AMDGPU_CS:
605     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
606     // Everything else is in VGPRs.
607     return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
608            F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
609   default:
610     // TODO: Should calls support inreg for SGPR inputs?
611     return false;
612   }
613 }
614 
615 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
616 /// this is analyzing the collective result of all output registers. Otherwise,
617 /// this is only querying a specific result index if this returns multiple
618 /// registers in a struct.
619 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
620   const CallInst *CI, ArrayRef<unsigned> Indices) const {
621   // TODO: Handle complex extract indices
622   if (Indices.size() > 1)
623     return true;
624 
625   const DataLayout &DL = CI->getModule()->getDataLayout();
626   const SIRegisterInfo *TRI = ST->getRegisterInfo();
627   ImmutableCallSite CS(CI);
628   TargetLowering::AsmOperandInfoVector TargetConstraints
629     = TLI->ParseConstraints(DL, ST->getRegisterInfo(), CS);
630 
631   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
632 
633   int OutputIdx = 0;
634   for (auto &TC : TargetConstraints) {
635     if (TC.Type != InlineAsm::isOutput)
636       continue;
637 
638     // Skip outputs we don't care about.
639     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
640       continue;
641 
642     TLI->ComputeConstraintToUse(TC, SDValue());
643 
644     Register AssignedReg;
645     const TargetRegisterClass *RC;
646     std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
647       TRI, TC.ConstraintCode, TC.ConstraintVT);
648     if (AssignedReg) {
649       // FIXME: This is a workaround for getRegForInlineAsmConstraint
650       // returning VS_32
651       RC = TRI->getPhysRegClass(AssignedReg);
652     }
653 
654     // For AGPR constraints null is returned on subtargets without AGPRs, so
655     // assume divergent for null.
656     if (!RC || !TRI->isSGPRClass(RC))
657       return true;
658   }
659 
660   return false;
661 }
662 
663 /// \returns true if the new GPU divergence analysis is enabled.
664 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
665   return !UseLegacyDA;
666 }
667 
668 /// \returns true if the result of the value could potentially be
669 /// different across workitems in a wavefront.
670 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
671   if (const Argument *A = dyn_cast<Argument>(V))
672     return !isArgPassedInSGPR(A);
673 
674   // Loads from the private and flat address spaces are divergent, because
675   // threads can execute the load instruction with the same inputs and get
676   // different results.
677   //
678   // All other loads are not divergent, because if threads issue loads with the
679   // same arguments, they will always get the same result.
680   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
681     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
682            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
683 
684   // Atomics are divergent because they are executed sequentially: when an
685   // atomic operation refers to the same address in each thread, then each
686   // thread after the first sees the value written by the previous thread as
687   // original value.
688   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
689     return true;
690 
691   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
692     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
693 
694   // Assume all function calls are a source of divergence.
695   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
696     if (isa<InlineAsm>(CI->getCalledValue()))
697       return isInlineAsmSourceOfDivergence(CI);
698     return true;
699   }
700 
701   // Assume all function calls are a source of divergence.
702   if (isa<InvokeInst>(V))
703     return true;
704 
705   return false;
706 }
707 
708 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
709   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
710     switch (Intrinsic->getIntrinsicID()) {
711     default:
712       return false;
713     case Intrinsic::amdgcn_readfirstlane:
714     case Intrinsic::amdgcn_readlane:
715     case Intrinsic::amdgcn_icmp:
716     case Intrinsic::amdgcn_fcmp:
717     case Intrinsic::amdgcn_if_break:
718       return true;
719     }
720   }
721 
722   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
723     if (isa<InlineAsm>(CI->getCalledValue()))
724       return !isInlineAsmSourceOfDivergence(CI);
725     return false;
726   }
727 
728   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
729   if (!ExtValue)
730     return false;
731 
732   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
733   if (!CI)
734     return false;
735 
736   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
737     switch (Intrinsic->getIntrinsicID()) {
738     default:
739       return false;
740     case Intrinsic::amdgcn_if:
741     case Intrinsic::amdgcn_else: {
742       ArrayRef<unsigned> Indices = ExtValue->getIndices();
743       return Indices.size() == 1 && Indices[0] == 1;
744     }
745     }
746   }
747 
748   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
749   // divergent for the overall struct return. We need to override it in the
750   // case we're extracting an SGPR component here.
751   if (isa<InlineAsm>(CI->getCalledValue()))
752     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
753 
754   return false;
755 }
756 
757 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
758                                             Intrinsic::ID IID) const {
759   switch (IID) {
760   case Intrinsic::amdgcn_atomic_inc:
761   case Intrinsic::amdgcn_atomic_dec:
762   case Intrinsic::amdgcn_ds_fadd:
763   case Intrinsic::amdgcn_ds_fmin:
764   case Intrinsic::amdgcn_ds_fmax:
765   case Intrinsic::amdgcn_is_shared:
766   case Intrinsic::amdgcn_is_private:
767     OpIndexes.push_back(0);
768     return true;
769   default:
770     return false;
771   }
772 }
773 
774 bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace(
775   IntrinsicInst *II, Value *OldV, Value *NewV) const {
776   auto IntrID = II->getIntrinsicID();
777   switch (IntrID) {
778   case Intrinsic::amdgcn_atomic_inc:
779   case Intrinsic::amdgcn_atomic_dec:
780   case Intrinsic::amdgcn_ds_fadd:
781   case Intrinsic::amdgcn_ds_fmin:
782   case Intrinsic::amdgcn_ds_fmax: {
783     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
784     if (!IsVolatile->isZero())
785       return false;
786     Module *M = II->getParent()->getParent()->getParent();
787     Type *DestTy = II->getType();
788     Type *SrcTy = NewV->getType();
789     Function *NewDecl =
790         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
791     II->setArgOperand(0, NewV);
792     II->setCalledFunction(NewDecl);
793     return true;
794   }
795   case Intrinsic::amdgcn_is_shared:
796   case Intrinsic::amdgcn_is_private: {
797     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
798       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
799     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
800     LLVMContext &Ctx = NewV->getType()->getContext();
801     ConstantInt *NewVal = (TrueAS == NewAS) ?
802       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
803     II->replaceAllUsesWith(NewVal);
804     II->eraseFromParent();
805     return true;
806   }
807   default:
808     return false;
809   }
810 }
811 
812 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
813                                        Type *SubTp) {
814   if (ST->hasVOP3PInsts()) {
815     VectorType *VT = cast<VectorType>(Tp);
816     if (VT->getNumElements() == 2 &&
817         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
818       // With op_sel VOP3P instructions freely can access the low half or high
819       // half of a register, so any swizzle is free.
820 
821       switch (Kind) {
822       case TTI::SK_Broadcast:
823       case TTI::SK_Reverse:
824       case TTI::SK_PermuteSingleSrc:
825         return 0;
826       default:
827         break;
828       }
829     }
830   }
831 
832   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
833 }
834 
835 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
836                                      const Function *Callee) const {
837   const TargetMachine &TM = getTLI()->getTargetMachine();
838   const GCNSubtarget *CallerST
839     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
840   const GCNSubtarget *CalleeST
841     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
842 
843   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
844   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
845 
846   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
847   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
848   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
849     return false;
850 
851   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
852   // no way to support merge for backend defined attributes.
853   AMDGPU::SIModeRegisterDefaults CallerMode(*Caller, *CallerST);
854   AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee, *CalleeST);
855   return CallerMode.isInlineCompatible(CalleeMode);
856 }
857 
858 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
859                                          TTI::UnrollingPreferences &UP) {
860   CommonTTI.getUnrollingPreferences(L, SE, UP);
861 }
862 
863 unsigned GCNTTIImpl::getUserCost(const User *U,
864                                  ArrayRef<const Value *> Operands) {
865   const Instruction *I = dyn_cast<Instruction>(U);
866   if (!I)
867     return BaseT::getUserCost(U, Operands);
868 
869   // Estimate different operations to be optimized out
870   switch (I->getOpcode()) {
871   case Instruction::ExtractElement: {
872     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
873     unsigned Idx = -1;
874     if (CI)
875       Idx = CI->getZExtValue();
876     return getVectorInstrCost(I->getOpcode(), I->getOperand(0)->getType(), Idx);
877   }
878   case Instruction::InsertElement: {
879     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
880     unsigned Idx = -1;
881     if (CI)
882       Idx = CI->getZExtValue();
883     return getVectorInstrCost(I->getOpcode(), I->getType(), Idx);
884   }
885   case Instruction::Call: {
886     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
887       SmallVector<Value *, 4> Args(II->arg_operands());
888       FastMathFlags FMF;
889       if (auto *FPMO = dyn_cast<FPMathOperator>(II))
890         FMF = FPMO->getFastMathFlags();
891       return getIntrinsicInstrCost(II->getIntrinsicID(), II->getType(), Args,
892                                    FMF);
893     } else {
894       return BaseT::getUserCost(U, Operands);
895     }
896   }
897   case Instruction::ShuffleVector: {
898     const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
899     Type *Ty = Shuffle->getType();
900     Type *SrcTy = Shuffle->getOperand(0)->getType();
901 
902     // TODO: Identify and add costs for insert subvector, etc.
903     int SubIndex;
904     if (Shuffle->isExtractSubvectorMask(SubIndex))
905       return getShuffleCost(TTI::SK_ExtractSubvector, SrcTy, SubIndex, Ty);
906 
907     if (Shuffle->changesLength())
908       return BaseT::getUserCost(U, Operands);
909 
910     if (Shuffle->isIdentity())
911       return 0;
912 
913     if (Shuffle->isReverse())
914       return getShuffleCost(TTI::SK_Reverse, Ty, 0, nullptr);
915 
916     if (Shuffle->isSelect())
917       return getShuffleCost(TTI::SK_Select, Ty, 0, nullptr);
918 
919     if (Shuffle->isTranspose())
920       return getShuffleCost(TTI::SK_Transpose, Ty, 0, nullptr);
921 
922     if (Shuffle->isZeroEltSplat())
923       return getShuffleCost(TTI::SK_Broadcast, Ty, 0, nullptr);
924 
925     if (Shuffle->isSingleSource())
926       return getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, nullptr);
927 
928     return getShuffleCost(TTI::SK_PermuteTwoSrc, Ty, 0, nullptr);
929   }
930   case Instruction::ZExt:
931   case Instruction::SExt:
932   case Instruction::FPToUI:
933   case Instruction::FPToSI:
934   case Instruction::FPExt:
935   case Instruction::PtrToInt:
936   case Instruction::IntToPtr:
937   case Instruction::SIToFP:
938   case Instruction::UIToFP:
939   case Instruction::Trunc:
940   case Instruction::FPTrunc:
941   case Instruction::BitCast:
942   case Instruction::AddrSpaceCast: {
943     return getCastInstrCost(I->getOpcode(), I->getType(),
944                             I->getOperand(0)->getType(), I);
945   }
946   case Instruction::Add:
947   case Instruction::FAdd:
948   case Instruction::Sub:
949   case Instruction::FSub:
950   case Instruction::Mul:
951   case Instruction::FMul:
952   case Instruction::UDiv:
953   case Instruction::SDiv:
954   case Instruction::FDiv:
955   case Instruction::URem:
956   case Instruction::SRem:
957   case Instruction::FRem:
958   case Instruction::Shl:
959   case Instruction::LShr:
960   case Instruction::AShr:
961   case Instruction::And:
962   case Instruction::Or:
963   case Instruction::Xor:
964   case Instruction::FNeg: {
965     return getArithmeticInstrCost(I->getOpcode(), I->getType(),
966                                   TTI::OK_AnyValue, TTI::OK_AnyValue,
967                                   TTI::OP_None, TTI::OP_None, Operands, I);
968   }
969   default:
970     break;
971   }
972 
973   return BaseT::getUserCost(U, Operands);
974 }
975 
976 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
977   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
978 }
979 
980 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
981   return getHardwareNumberOfRegisters(Vec);
982 }
983 
984 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
985   return 32;
986 }
987 
988 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
989   return 32;
990 }
991 
992 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
993   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
994       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
995     return 128;
996   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
997       AddrSpace == AMDGPUAS::REGION_ADDRESS)
998     return 64;
999   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1000     return 32;
1001 
1002   if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1003       AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1004       (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1005       AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1006     return 128;
1007   llvm_unreachable("unhandled address space");
1008 }
1009 
1010 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1011                                              unsigned Alignment,
1012                                              unsigned AddrSpace) const {
1013   // We allow vectorization of flat stores, even though we may need to decompose
1014   // them later if they may access private memory. We don't have enough context
1015   // here, and legalization can handle it.
1016   return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1017 }
1018 
1019 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1020                                               unsigned Alignment,
1021                                               unsigned AddrSpace) const {
1022   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1023 }
1024 
1025 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1026                                                unsigned Alignment,
1027                                                unsigned AddrSpace) const {
1028   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1029 }
1030 
1031 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1032   // Disable unrolling if the loop is not vectorized.
1033   // TODO: Enable this again.
1034   if (VF == 1)
1035     return 1;
1036 
1037   return 8;
1038 }
1039 
1040 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode) {
1041   // XXX - For some reason this isn't called for switch.
1042   switch (Opcode) {
1043   case Instruction::Br:
1044   case Instruction::Ret:
1045     return 10;
1046   default:
1047     return BaseT::getCFInstrCost(Opcode);
1048   }
1049 }
1050 
1051 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1052                                     unsigned Index) {
1053   switch (Opcode) {
1054   case Instruction::ExtractElement:
1055   case Instruction::InsertElement: {
1056     unsigned EltSize
1057       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1058     if (EltSize < 32) {
1059       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1060     }
1061 
1062     // Extracts are just reads of a subregister, so are free. Inserts are
1063     // considered free because we don't want to have any cost for scalarizing
1064     // operations, and we don't have to copy into a different register class.
1065 
1066     // Dynamic indexing isn't free and is best avoided.
1067     return Index == ~0u ? 2 : 0;
1068   }
1069   default:
1070     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1071   }
1072 }
1073 
1074 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1075                                           TTI::UnrollingPreferences &UP) {
1076   CommonTTI.getUnrollingPreferences(L, SE, UP);
1077 }
1078