1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/ISDOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MachineValueType.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include <algorithm>
49 #include <cassert>
50 #include <limits>
51 #include <utility>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "AMDGPUtti"
56 
57 static cl::opt<unsigned> UnrollThresholdPrivate(
58   "amdgpu-unroll-threshold-private",
59   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
60   cl::init(2700), cl::Hidden);
61 
62 static cl::opt<unsigned> UnrollThresholdLocal(
63   "amdgpu-unroll-threshold-local",
64   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
65   cl::init(1000), cl::Hidden);
66 
67 static cl::opt<unsigned> UnrollThresholdIf(
68   "amdgpu-unroll-threshold-if",
69   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
70   cl::init(150), cl::Hidden);
71 
72 static cl::opt<bool> UnrollRuntimeLocal(
73   "amdgpu-unroll-runtime-local",
74   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
75   cl::init(true), cl::Hidden);
76 
77 static cl::opt<bool> UseLegacyDA(
78   "amdgpu-use-legacy-divergence-analysis",
79   cl::desc("Enable legacy divergence analysis for AMDGPU"),
80   cl::init(false), cl::Hidden);
81 
82 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
83                               unsigned Depth = 0) {
84   const Instruction *I = dyn_cast<Instruction>(Cond);
85   if (!I)
86     return false;
87 
88   for (const Value *V : I->operand_values()) {
89     if (!L->contains(I))
90       continue;
91     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
92       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
93                   return SubLoop->contains(PHI); }))
94         return true;
95     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
96       return true;
97   }
98   return false;
99 }
100 
101 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
102                                             TTI::UnrollingPreferences &UP) {
103   const Function &F = *L->getHeader()->getParent();
104   UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
105   UP.MaxCount = std::numeric_limits<unsigned>::max();
106   UP.Partial = true;
107 
108   // TODO: Do we want runtime unrolling?
109 
110   // Maximum alloca size than can fit registers. Reserve 16 registers.
111   const unsigned MaxAlloca = (256 - 16) * 4;
112   unsigned ThresholdPrivate = UnrollThresholdPrivate;
113   unsigned ThresholdLocal = UnrollThresholdLocal;
114   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
115   for (const BasicBlock *BB : L->getBlocks()) {
116     const DataLayout &DL = BB->getModule()->getDataLayout();
117     unsigned LocalGEPsSeen = 0;
118 
119     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
120                return SubLoop->contains(BB); }))
121         continue; // Block belongs to an inner loop.
122 
123     for (const Instruction &I : *BB) {
124       // Unroll a loop which contains an "if" statement whose condition
125       // defined by a PHI belonging to the loop. This may help to eliminate
126       // if region and potentially even PHI itself, saving on both divergence
127       // and registers used for the PHI.
128       // Add a small bonus for each of such "if" statements.
129       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
130         if (UP.Threshold < MaxBoost && Br->isConditional()) {
131           BasicBlock *Succ0 = Br->getSuccessor(0);
132           BasicBlock *Succ1 = Br->getSuccessor(1);
133           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
134               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
135             continue;
136           if (dependsOnLocalPhi(L, Br->getCondition())) {
137             UP.Threshold += UnrollThresholdIf;
138             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
139                               << " for loop:\n"
140                               << *L << " due to " << *Br << '\n');
141             if (UP.Threshold >= MaxBoost)
142               return;
143           }
144         }
145         continue;
146       }
147 
148       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
149       if (!GEP)
150         continue;
151 
152       unsigned AS = GEP->getAddressSpace();
153       unsigned Threshold = 0;
154       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
155         Threshold = ThresholdPrivate;
156       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
157         Threshold = ThresholdLocal;
158       else
159         continue;
160 
161       if (UP.Threshold >= Threshold)
162         continue;
163 
164       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
165         const Value *Ptr = GEP->getPointerOperand();
166         const AllocaInst *Alloca =
167             dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
168         if (!Alloca || !Alloca->isStaticAlloca())
169           continue;
170         Type *Ty = Alloca->getAllocatedType();
171         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
172         if (AllocaSize > MaxAlloca)
173           continue;
174       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
175                  AS == AMDGPUAS::REGION_ADDRESS) {
176         LocalGEPsSeen++;
177         // Inhibit unroll for local memory if we have seen addressing not to
178         // a variable, most likely we will be unable to combine it.
179         // Do not unroll too deep inner loops for local memory to give a chance
180         // to unroll an outer loop for a more important reason.
181         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
182             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
183              !isa<Argument>(GEP->getPointerOperand())))
184           continue;
185         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
186                           << *L << " due to LDS use.\n");
187         UP.Runtime = UnrollRuntimeLocal;
188       }
189 
190       // Check if GEP depends on a value defined by this loop itself.
191       bool HasLoopDef = false;
192       for (const Value *Op : GEP->operands()) {
193         const Instruction *Inst = dyn_cast<Instruction>(Op);
194         if (!Inst || L->isLoopInvariant(Op))
195           continue;
196 
197         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
198              return SubLoop->contains(Inst); }))
199           continue;
200         HasLoopDef = true;
201         break;
202       }
203       if (!HasLoopDef)
204         continue;
205 
206       // We want to do whatever we can to limit the number of alloca
207       // instructions that make it through to the code generator.  allocas
208       // require us to use indirect addressing, which is slow and prone to
209       // compiler bugs.  If this loop does an address calculation on an
210       // alloca ptr, then we want to use a higher than normal loop unroll
211       // threshold. This will give SROA a better chance to eliminate these
212       // allocas.
213       //
214       // We also want to have more unrolling for local memory to let ds
215       // instructions with different offsets combine.
216       //
217       // Don't use the maximum allowed value here as it will make some
218       // programs way too big.
219       UP.Threshold = Threshold;
220       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
221                         << " for loop:\n"
222                         << *L << " due to " << *GEP << '\n');
223       if (UP.Threshold >= MaxBoost)
224         return;
225     }
226   }
227 }
228 
229 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
230   // The concept of vector registers doesn't really exist. Some packed vector
231   // operations operate on the normal 32-bit registers.
232   return 256;
233 }
234 
235 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
236   // This is really the number of registers to fill when vectorizing /
237   // interleaving loops, so we lie to avoid trying to use all registers.
238   return getHardwareNumberOfRegisters(Vec) >> 3;
239 }
240 
241 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
242   return 32;
243 }
244 
245 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
246   return 32;
247 }
248 
249 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
250                                             unsigned ChainSizeInBytes,
251                                             VectorType *VecTy) const {
252   unsigned VecRegBitWidth = VF * LoadSize;
253   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
254     // TODO: Support element-size less than 32bit?
255     return 128 / LoadSize;
256 
257   return VF;
258 }
259 
260 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
261                                              unsigned ChainSizeInBytes,
262                                              VectorType *VecTy) const {
263   unsigned VecRegBitWidth = VF * StoreSize;
264   if (VecRegBitWidth > 128)
265     return 128 / StoreSize;
266 
267   return VF;
268 }
269 
270 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
271   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
272       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
273       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
274       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
275     return 512;
276   }
277 
278   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
279     return 8 * ST->getMaxPrivateElementSize();
280 
281   // Common to flat, global, local and region. Assume for unknown addrspace.
282   return 128;
283 }
284 
285 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
286                                                unsigned Alignment,
287                                                unsigned AddrSpace) const {
288   // We allow vectorization of flat stores, even though we may need to decompose
289   // them later if they may access private memory. We don't have enough context
290   // here, and legalization can handle it.
291   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
292     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
293       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
294   }
295   return true;
296 }
297 
298 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
299                                                 unsigned Alignment,
300                                                 unsigned AddrSpace) const {
301   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
302 }
303 
304 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
305                                                  unsigned Alignment,
306                                                  unsigned AddrSpace) const {
307   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
308 }
309 
310 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
311 // iteration. Should we report a larger size and let it legalize?
312 //
313 // FIXME: Should we use narrower types for local/region, or account for when
314 // unaligned access is legal?
315 //
316 // FIXME: This could use fine tuning and microbenchmarks.
317 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
318                                             unsigned SrcAddrSpace,
319                                             unsigned DestAddrSpace,
320                                             unsigned SrcAlign,
321                                             unsigned DestAlign) const {
322   unsigned MinAlign = std::min(SrcAlign, DestAlign);
323 
324   // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
325   // hardware into byte accesses. If you assume all alignments are equally
326   // probable, it's more efficient on average to use short accesses for this
327   // case.
328   if (MinAlign == 2)
329     return Type::getInt16Ty(Context);
330 
331   // Not all subtargets have 128-bit DS instructions, and we currently don't
332   // form them by default.
333   if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
334       SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
335       DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
336       DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
337     return VectorType::get(Type::getInt32Ty(Context), 2);
338   }
339 
340   // Global memory works best with 16-byte accesses. Private memory will also
341   // hit this, although they'll be decomposed.
342   return VectorType::get(Type::getInt32Ty(Context), 4);
343 }
344 
345 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
346   SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
347   unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
348   unsigned SrcAlign, unsigned DestAlign) const {
349   assert(RemainingBytes < 16);
350 
351   unsigned MinAlign = std::min(SrcAlign, DestAlign);
352 
353   if (MinAlign != 2) {
354     Type *I64Ty = Type::getInt64Ty(Context);
355     while (RemainingBytes >= 8) {
356       OpsOut.push_back(I64Ty);
357       RemainingBytes -= 8;
358     }
359 
360     Type *I32Ty = Type::getInt32Ty(Context);
361     while (RemainingBytes >= 4) {
362       OpsOut.push_back(I32Ty);
363       RemainingBytes -= 4;
364     }
365   }
366 
367   Type *I16Ty = Type::getInt16Ty(Context);
368   while (RemainingBytes >= 2) {
369     OpsOut.push_back(I16Ty);
370     RemainingBytes -= 2;
371   }
372 
373   Type *I8Ty = Type::getInt8Ty(Context);
374   while (RemainingBytes) {
375     OpsOut.push_back(I8Ty);
376     --RemainingBytes;
377   }
378 }
379 
380 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
381   // Disable unrolling if the loop is not vectorized.
382   // TODO: Enable this again.
383   if (VF == 1)
384     return 1;
385 
386   return 8;
387 }
388 
389 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
390                                        MemIntrinsicInfo &Info) const {
391   switch (Inst->getIntrinsicID()) {
392   case Intrinsic::amdgcn_atomic_inc:
393   case Intrinsic::amdgcn_atomic_dec:
394   case Intrinsic::amdgcn_ds_ordered_add:
395   case Intrinsic::amdgcn_ds_ordered_swap:
396   case Intrinsic::amdgcn_ds_fadd:
397   case Intrinsic::amdgcn_ds_fmin:
398   case Intrinsic::amdgcn_ds_fmax: {
399     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
400     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
401     if (!Ordering || !Volatile)
402       return false; // Invalid.
403 
404     unsigned OrderingVal = Ordering->getZExtValue();
405     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
406       return false;
407 
408     Info.PtrVal = Inst->getArgOperand(0);
409     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
410     Info.ReadMem = true;
411     Info.WriteMem = true;
412     Info.IsVolatile = !Volatile->isNullValue();
413     return true;
414   }
415   default:
416     return false;
417   }
418 }
419 
420 int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
421                                        TTI::TargetCostKind CostKind,
422                                        TTI::OperandValueKind Opd1Info,
423                                        TTI::OperandValueKind Opd2Info,
424                                        TTI::OperandValueProperties Opd1PropInfo,
425                                        TTI::OperandValueProperties Opd2PropInfo,
426                                        ArrayRef<const Value *> Args,
427                                        const Instruction *CxtI) {
428   EVT OrigTy = TLI->getValueType(DL, Ty);
429   if (!OrigTy.isSimple()) {
430     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
431                                          Opd2Info,
432                                          Opd1PropInfo, Opd2PropInfo);
433   }
434 
435   // Legalize the type.
436   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
437   int ISD = TLI->InstructionOpcodeToISD(Opcode);
438 
439   // Because we don't have any legal vector operations, but the legal types, we
440   // need to account for split vectors.
441   unsigned NElts = LT.second.isVector() ?
442     LT.second.getVectorNumElements() : 1;
443 
444   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
445 
446   switch (ISD) {
447   case ISD::SHL:
448   case ISD::SRL:
449   case ISD::SRA:
450     if (SLT == MVT::i64)
451       return get64BitInstrCost() * LT.first * NElts;
452 
453     if (ST->has16BitInsts() && SLT == MVT::i16)
454       NElts = (NElts + 1) / 2;
455 
456     // i32
457     return getFullRateInstrCost() * LT.first * NElts;
458   case ISD::ADD:
459   case ISD::SUB:
460   case ISD::AND:
461   case ISD::OR:
462   case ISD::XOR:
463     if (SLT == MVT::i64) {
464       // and, or and xor are typically split into 2 VALU instructions.
465       return 2 * getFullRateInstrCost() * LT.first * NElts;
466     }
467 
468     if (ST->has16BitInsts() && SLT == MVT::i16)
469       NElts = (NElts + 1) / 2;
470 
471     return LT.first * NElts * getFullRateInstrCost();
472   case ISD::MUL: {
473     const int QuarterRateCost = getQuarterRateInstrCost();
474     if (SLT == MVT::i64) {
475       const int FullRateCost = getFullRateInstrCost();
476       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
477     }
478 
479     if (ST->has16BitInsts() && SLT == MVT::i16)
480       NElts = (NElts + 1) / 2;
481 
482     // i32
483     return QuarterRateCost * NElts * LT.first;
484   }
485   case ISD::FADD:
486   case ISD::FSUB:
487   case ISD::FMUL:
488     if (SLT == MVT::f64)
489       return LT.first * NElts * get64BitInstrCost();
490 
491     if (ST->has16BitInsts() && SLT == MVT::f16)
492       NElts = (NElts + 1) / 2;
493 
494     if (SLT == MVT::f32 || SLT == MVT::f16)
495       return LT.first * NElts * getFullRateInstrCost();
496     break;
497   case ISD::FDIV:
498   case ISD::FREM:
499     // FIXME: frem should be handled separately. The fdiv in it is most of it,
500     // but the current lowering is also not entirely correct.
501     if (SLT == MVT::f64) {
502       int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
503       // Add cost of workaround.
504       if (!ST->hasUsableDivScaleConditionOutput())
505         Cost += 3 * getFullRateInstrCost();
506 
507       return LT.first * Cost * NElts;
508     }
509 
510     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
511       // TODO: This is more complicated, unsafe flags etc.
512       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
513           (SLT == MVT::f16 && ST->has16BitInsts())) {
514         return LT.first * getQuarterRateInstrCost() * NElts;
515       }
516     }
517 
518     if (SLT == MVT::f16 && ST->has16BitInsts()) {
519       // 2 x v_cvt_f32_f16
520       // f32 rcp
521       // f32 fmul
522       // v_cvt_f16_f32
523       // f16 div_fixup
524       int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
525       return LT.first * Cost * NElts;
526     }
527 
528     if (SLT == MVT::f32 || SLT == MVT::f16) {
529       int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
530 
531       if (!HasFP32Denormals) {
532         // FP mode switches.
533         Cost += 2 * getFullRateInstrCost();
534       }
535 
536       return LT.first * NElts * Cost;
537     }
538     break;
539   default:
540     break;
541   }
542 
543   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
544                                        Opd2Info,
545                                        Opd1PropInfo, Opd2PropInfo);
546 }
547 
548 // Return true if there's a potential benefit from using v2f16 instructions for
549 // an intrinsic, even if it requires nontrivial legalization.
550 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
551   switch (ID) {
552   case Intrinsic::fma: // TODO: fmuladd
553   // There's a small benefit to using vector ops in the legalized code.
554   case Intrinsic::round:
555     return true;
556   default:
557     return false;
558   }
559 }
560 
561 int GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
562                                       TTI::TargetCostKind CostKind) {
563   if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
564     return BaseT::getIntrinsicInstrCost(ICA, CostKind);
565 
566   Type *RetTy = ICA.getReturnType();
567   EVT OrigTy = TLI->getValueType(DL, RetTy);
568   if (!OrigTy.isSimple()) {
569     return BaseT::getIntrinsicInstrCost(ICA, CostKind);
570   }
571 
572   // Legalize the type.
573   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
574 
575   unsigned NElts = LT.second.isVector() ?
576     LT.second.getVectorNumElements() : 1;
577 
578   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
579 
580   if (SLT == MVT::f64)
581     return LT.first * NElts * get64BitInstrCost();
582 
583   if (ST->has16BitInsts() && SLT == MVT::f16)
584     NElts = (NElts + 1) / 2;
585 
586   // TODO: Get more refined intrinsic costs?
587   unsigned InstRate = getQuarterRateInstrCost();
588   if (ICA.getID() == Intrinsic::fma) {
589     InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost()
590                                    : getQuarterRateInstrCost();
591   }
592 
593   return LT.first * NElts * InstRate;
594 }
595 
596 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode,
597                                     TTI::TargetCostKind CostKind) {
598   // XXX - For some reason this isn't called for switch.
599   switch (Opcode) {
600   case Instruction::Br:
601   case Instruction::Ret:
602     return 10;
603   default:
604     return BaseT::getCFInstrCost(Opcode, CostKind);
605   }
606 }
607 
608 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
609                                            bool IsPairwise,
610                                            TTI::TargetCostKind CostKind) {
611   EVT OrigTy = TLI->getValueType(DL, Ty);
612 
613   // Computes cost on targets that have packed math instructions(which support
614   // 16-bit types only).
615   if (IsPairwise ||
616       !ST->hasVOP3PInsts() ||
617       OrigTy.getScalarSizeInBits() != 16)
618     return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise, CostKind);
619 
620   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
621   return LT.first * getFullRateInstrCost();
622 }
623 
624 int GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
625                                        bool IsPairwise, bool IsUnsigned,
626                                        TTI::TargetCostKind CostKind) {
627   EVT OrigTy = TLI->getValueType(DL, Ty);
628 
629   // Computes cost on targets that have packed math instructions(which support
630   // 16-bit types only).
631   if (IsPairwise ||
632       !ST->hasVOP3PInsts() ||
633       OrigTy.getScalarSizeInBits() != 16)
634     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
635                                          CostKind);
636 
637   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
638   return LT.first * getHalfRateInstrCost();
639 }
640 
641 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
642                                       unsigned Index) {
643   switch (Opcode) {
644   case Instruction::ExtractElement:
645   case Instruction::InsertElement: {
646     unsigned EltSize
647       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
648     if (EltSize < 32) {
649       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
650         return 0;
651       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
652     }
653 
654     // Extracts are just reads of a subregister, so are free. Inserts are
655     // considered free because we don't want to have any cost for scalarizing
656     // operations, and we don't have to copy into a different register class.
657 
658     // Dynamic indexing isn't free and is best avoided.
659     return Index == ~0u ? 2 : 0;
660   }
661   default:
662     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
663   }
664 }
665 
666 static bool isArgPassedInSGPR(const Argument *A) {
667   const Function *F = A->getParent();
668 
669   // Arguments to compute shaders are never a source of divergence.
670   CallingConv::ID CC = F->getCallingConv();
671   switch (CC) {
672   case CallingConv::AMDGPU_KERNEL:
673   case CallingConv::SPIR_KERNEL:
674     return true;
675   case CallingConv::AMDGPU_VS:
676   case CallingConv::AMDGPU_LS:
677   case CallingConv::AMDGPU_HS:
678   case CallingConv::AMDGPU_ES:
679   case CallingConv::AMDGPU_GS:
680   case CallingConv::AMDGPU_PS:
681   case CallingConv::AMDGPU_CS:
682     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
683     // Everything else is in VGPRs.
684     return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
685            F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
686   default:
687     // TODO: Should calls support inreg for SGPR inputs?
688     return false;
689   }
690 }
691 
692 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
693 /// this is analyzing the collective result of all output registers. Otherwise,
694 /// this is only querying a specific result index if this returns multiple
695 /// registers in a struct.
696 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
697   const CallInst *CI, ArrayRef<unsigned> Indices) const {
698   // TODO: Handle complex extract indices
699   if (Indices.size() > 1)
700     return true;
701 
702   const DataLayout &DL = CI->getModule()->getDataLayout();
703   const SIRegisterInfo *TRI = ST->getRegisterInfo();
704   TargetLowering::AsmOperandInfoVector TargetConstraints =
705       TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
706 
707   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
708 
709   int OutputIdx = 0;
710   for (auto &TC : TargetConstraints) {
711     if (TC.Type != InlineAsm::isOutput)
712       continue;
713 
714     // Skip outputs we don't care about.
715     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
716       continue;
717 
718     TLI->ComputeConstraintToUse(TC, SDValue());
719 
720     Register AssignedReg;
721     const TargetRegisterClass *RC;
722     std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
723       TRI, TC.ConstraintCode, TC.ConstraintVT);
724     if (AssignedReg) {
725       // FIXME: This is a workaround for getRegForInlineAsmConstraint
726       // returning VS_32
727       RC = TRI->getPhysRegClass(AssignedReg);
728     }
729 
730     // For AGPR constraints null is returned on subtargets without AGPRs, so
731     // assume divergent for null.
732     if (!RC || !TRI->isSGPRClass(RC))
733       return true;
734   }
735 
736   return false;
737 }
738 
739 /// \returns true if the new GPU divergence analysis is enabled.
740 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
741   return !UseLegacyDA;
742 }
743 
744 /// \returns true if the result of the value could potentially be
745 /// different across workitems in a wavefront.
746 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
747   if (const Argument *A = dyn_cast<Argument>(V))
748     return !isArgPassedInSGPR(A);
749 
750   // Loads from the private and flat address spaces are divergent, because
751   // threads can execute the load instruction with the same inputs and get
752   // different results.
753   //
754   // All other loads are not divergent, because if threads issue loads with the
755   // same arguments, they will always get the same result.
756   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
757     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
758            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
759 
760   // Atomics are divergent because they are executed sequentially: when an
761   // atomic operation refers to the same address in each thread, then each
762   // thread after the first sees the value written by the previous thread as
763   // original value.
764   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
765     return true;
766 
767   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
768     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
769 
770   // Assume all function calls are a source of divergence.
771   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
772     if (CI->isInlineAsm())
773       return isInlineAsmSourceOfDivergence(CI);
774     return true;
775   }
776 
777   // Assume all function calls are a source of divergence.
778   if (isa<InvokeInst>(V))
779     return true;
780 
781   return false;
782 }
783 
784 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
785   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
786     switch (Intrinsic->getIntrinsicID()) {
787     default:
788       return false;
789     case Intrinsic::amdgcn_readfirstlane:
790     case Intrinsic::amdgcn_readlane:
791     case Intrinsic::amdgcn_icmp:
792     case Intrinsic::amdgcn_fcmp:
793     case Intrinsic::amdgcn_ballot:
794     case Intrinsic::amdgcn_if_break:
795       return true;
796     }
797   }
798 
799   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
800     if (CI->isInlineAsm())
801       return !isInlineAsmSourceOfDivergence(CI);
802     return false;
803   }
804 
805   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
806   if (!ExtValue)
807     return false;
808 
809   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
810   if (!CI)
811     return false;
812 
813   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
814     switch (Intrinsic->getIntrinsicID()) {
815     default:
816       return false;
817     case Intrinsic::amdgcn_if:
818     case Intrinsic::amdgcn_else: {
819       ArrayRef<unsigned> Indices = ExtValue->getIndices();
820       return Indices.size() == 1 && Indices[0] == 1;
821     }
822     }
823   }
824 
825   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
826   // divergent for the overall struct return. We need to override it in the
827   // case we're extracting an SGPR component here.
828   if (CI->isInlineAsm())
829     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
830 
831   return false;
832 }
833 
834 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
835                                             Intrinsic::ID IID) const {
836   switch (IID) {
837   case Intrinsic::amdgcn_atomic_inc:
838   case Intrinsic::amdgcn_atomic_dec:
839   case Intrinsic::amdgcn_ds_fadd:
840   case Intrinsic::amdgcn_ds_fmin:
841   case Intrinsic::amdgcn_ds_fmax:
842   case Intrinsic::amdgcn_is_shared:
843   case Intrinsic::amdgcn_is_private:
844     OpIndexes.push_back(0);
845     return true;
846   default:
847     return false;
848   }
849 }
850 
851 bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace(
852   IntrinsicInst *II, Value *OldV, Value *NewV) const {
853   auto IntrID = II->getIntrinsicID();
854   switch (IntrID) {
855   case Intrinsic::amdgcn_atomic_inc:
856   case Intrinsic::amdgcn_atomic_dec:
857   case Intrinsic::amdgcn_ds_fadd:
858   case Intrinsic::amdgcn_ds_fmin:
859   case Intrinsic::amdgcn_ds_fmax: {
860     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
861     if (!IsVolatile->isZero())
862       return false;
863     Module *M = II->getParent()->getParent()->getParent();
864     Type *DestTy = II->getType();
865     Type *SrcTy = NewV->getType();
866     Function *NewDecl =
867         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
868     II->setArgOperand(0, NewV);
869     II->setCalledFunction(NewDecl);
870     return true;
871   }
872   case Intrinsic::amdgcn_is_shared:
873   case Intrinsic::amdgcn_is_private: {
874     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
875       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
876     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
877     LLVMContext &Ctx = NewV->getType()->getContext();
878     ConstantInt *NewVal = (TrueAS == NewAS) ?
879       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
880     II->replaceAllUsesWith(NewVal);
881     II->eraseFromParent();
882     return true;
883   }
884   default:
885     return false;
886   }
887 }
888 
889 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *VT,
890                                     int Index, VectorType *SubTp) {
891   if (ST->hasVOP3PInsts()) {
892     if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
893         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
894       // With op_sel VOP3P instructions freely can access the low half or high
895       // half of a register, so any swizzle is free.
896 
897       switch (Kind) {
898       case TTI::SK_Broadcast:
899       case TTI::SK_Reverse:
900       case TTI::SK_PermuteSingleSrc:
901         return 0;
902       default:
903         break;
904       }
905     }
906   }
907 
908   return BaseT::getShuffleCost(Kind, VT, Index, SubTp);
909 }
910 
911 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
912                                      const Function *Callee) const {
913   const TargetMachine &TM = getTLI()->getTargetMachine();
914   const GCNSubtarget *CallerST
915     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
916   const GCNSubtarget *CalleeST
917     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
918 
919   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
920   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
921 
922   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
923   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
924   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
925     return false;
926 
927   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
928   // no way to support merge for backend defined attributes.
929   AMDGPU::SIModeRegisterDefaults CallerMode(*Caller);
930   AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee);
931   return CallerMode.isInlineCompatible(CalleeMode);
932 }
933 
934 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
935                                          TTI::UnrollingPreferences &UP) {
936   CommonTTI.getUnrollingPreferences(L, SE, UP);
937 }
938 
939 unsigned
940 GCNTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
941                         TTI::TargetCostKind CostKind) {
942   const Instruction *I = dyn_cast<Instruction>(U);
943   if (!I)
944     return BaseT::getUserCost(U, Operands, CostKind);
945 
946   // Estimate different operations to be optimized out
947   switch (I->getOpcode()) {
948   case Instruction::ExtractElement: {
949     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
950     unsigned Idx = -1;
951     if (CI)
952       Idx = CI->getZExtValue();
953     return getVectorInstrCost(I->getOpcode(), I->getOperand(0)->getType(), Idx);
954   }
955   case Instruction::InsertElement: {
956     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
957     unsigned Idx = -1;
958     if (CI)
959       Idx = CI->getZExtValue();
960     return getVectorInstrCost(I->getOpcode(), I->getType(), Idx);
961   }
962   case Instruction::Call: {
963     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
964       IntrinsicCostAttributes CostAttrs(*II);
965       return getIntrinsicInstrCost(CostAttrs, CostKind);
966     } else {
967       return BaseT::getUserCost(U, Operands, CostKind);
968     }
969   }
970   case Instruction::ShuffleVector: {
971     const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
972     auto *Ty = cast<VectorType>(Shuffle->getType());
973     auto *SrcTy = cast<VectorType>(Shuffle->getOperand(0)->getType());
974 
975     // TODO: Identify and add costs for insert subvector, etc.
976     int SubIndex;
977     if (Shuffle->isExtractSubvectorMask(SubIndex))
978       return getShuffleCost(TTI::SK_ExtractSubvector, SrcTy, SubIndex, Ty);
979 
980     if (Shuffle->changesLength())
981       return BaseT::getUserCost(U, Operands, CostKind);
982 
983     if (Shuffle->isIdentity())
984       return 0;
985 
986     if (Shuffle->isReverse())
987       return getShuffleCost(TTI::SK_Reverse, Ty, 0, nullptr);
988 
989     if (Shuffle->isSelect())
990       return getShuffleCost(TTI::SK_Select, Ty, 0, nullptr);
991 
992     if (Shuffle->isTranspose())
993       return getShuffleCost(TTI::SK_Transpose, Ty, 0, nullptr);
994 
995     if (Shuffle->isZeroEltSplat())
996       return getShuffleCost(TTI::SK_Broadcast, Ty, 0, nullptr);
997 
998     if (Shuffle->isSingleSource())
999       return getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, nullptr);
1000 
1001     return getShuffleCost(TTI::SK_PermuteTwoSrc, Ty, 0, nullptr);
1002   }
1003   case Instruction::ZExt:
1004   case Instruction::SExt:
1005   case Instruction::FPToUI:
1006   case Instruction::FPToSI:
1007   case Instruction::FPExt:
1008   case Instruction::PtrToInt:
1009   case Instruction::IntToPtr:
1010   case Instruction::SIToFP:
1011   case Instruction::UIToFP:
1012   case Instruction::Trunc:
1013   case Instruction::FPTrunc:
1014   case Instruction::BitCast:
1015   case Instruction::AddrSpaceCast: {
1016     return getCastInstrCost(I->getOpcode(), I->getType(),
1017                             I->getOperand(0)->getType(), CostKind, I);
1018   }
1019   case Instruction::Add:
1020   case Instruction::FAdd:
1021   case Instruction::Sub:
1022   case Instruction::FSub:
1023   case Instruction::Mul:
1024   case Instruction::FMul:
1025   case Instruction::UDiv:
1026   case Instruction::SDiv:
1027   case Instruction::FDiv:
1028   case Instruction::URem:
1029   case Instruction::SRem:
1030   case Instruction::FRem:
1031   case Instruction::Shl:
1032   case Instruction::LShr:
1033   case Instruction::AShr:
1034   case Instruction::And:
1035   case Instruction::Or:
1036   case Instruction::Xor:
1037   case Instruction::FNeg: {
1038     return getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind,
1039                                   TTI::OK_AnyValue, TTI::OK_AnyValue,
1040                                   TTI::OP_None, TTI::OP_None, Operands, I);
1041   }
1042   default:
1043     break;
1044   }
1045 
1046   return BaseT::getUserCost(U, Operands, CostKind);
1047 }
1048 
1049 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
1050   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
1051 }
1052 
1053 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
1054   return getHardwareNumberOfRegisters(Vec);
1055 }
1056 
1057 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
1058   return 32;
1059 }
1060 
1061 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
1062   return 32;
1063 }
1064 
1065 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1066   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
1067       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
1068     return 128;
1069   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1070       AddrSpace == AMDGPUAS::REGION_ADDRESS)
1071     return 64;
1072   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1073     return 32;
1074 
1075   if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1076       AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1077       (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1078       AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1079     return 128;
1080   llvm_unreachable("unhandled address space");
1081 }
1082 
1083 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1084                                              unsigned Alignment,
1085                                              unsigned AddrSpace) const {
1086   // We allow vectorization of flat stores, even though we may need to decompose
1087   // them later if they may access private memory. We don't have enough context
1088   // here, and legalization can handle it.
1089   return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1090 }
1091 
1092 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1093                                               unsigned Alignment,
1094                                               unsigned AddrSpace) const {
1095   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1096 }
1097 
1098 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1099                                                unsigned Alignment,
1100                                                unsigned AddrSpace) const {
1101   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1102 }
1103 
1104 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1105   // Disable unrolling if the loop is not vectorized.
1106   // TODO: Enable this again.
1107   if (VF == 1)
1108     return 1;
1109 
1110   return 8;
1111 }
1112 
1113 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode,
1114                                      TTI::TargetCostKind CostKind) {
1115   // XXX - For some reason this isn't called for switch.
1116   switch (Opcode) {
1117   case Instruction::Br:
1118   case Instruction::Ret:
1119     return 10;
1120   default:
1121     return BaseT::getCFInstrCost(Opcode, CostKind);
1122   }
1123 }
1124 
1125 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1126                                     unsigned Index) {
1127   switch (Opcode) {
1128   case Instruction::ExtractElement:
1129   case Instruction::InsertElement: {
1130     unsigned EltSize
1131       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1132     if (EltSize < 32) {
1133       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1134     }
1135 
1136     // Extracts are just reads of a subregister, so are free. Inserts are
1137     // considered free because we don't want to have any cost for scalarizing
1138     // operations, and we don't have to copy into a different register class.
1139 
1140     // Dynamic indexing isn't free and is best avoided.
1141     return Index == ~0u ? 2 : 0;
1142   }
1143   default:
1144     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1145   }
1146 }
1147 
1148 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1149                                           TTI::UnrollingPreferences &UP) {
1150   CommonTTI.getUnrollingPreferences(L, SE, UP);
1151 }
1152