1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // \file
11 // This file implements a TargetTransformInfo analysis pass specific to the
12 // AMDGPU target machine. It uses the target's detailed information to provide
13 // more precise answers to certain TTI queries, while letting the target
14 // independent and default TTI implementations handle the rest.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "AMDGPUTargetTransformInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "Utils/AMDGPUBaseInfo.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/ISDOpcodes.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/Argument.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/MC/SubtargetFeature.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/MachineValueType.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <limits>
52 #include <utility>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "AMDGPUtti"
57 
58 static cl::opt<unsigned> UnrollThresholdPrivate(
59   "amdgpu-unroll-threshold-private",
60   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
61   cl::init(2500), cl::Hidden);
62 
63 static cl::opt<unsigned> UnrollThresholdLocal(
64   "amdgpu-unroll-threshold-local",
65   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
66   cl::init(1000), cl::Hidden);
67 
68 static cl::opt<unsigned> UnrollThresholdIf(
69   "amdgpu-unroll-threshold-if",
70   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
71   cl::init(150), cl::Hidden);
72 
73 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
74                               unsigned Depth = 0) {
75   const Instruction *I = dyn_cast<Instruction>(Cond);
76   if (!I)
77     return false;
78 
79   for (const Value *V : I->operand_values()) {
80     if (!L->contains(I))
81       continue;
82     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
83       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
84                   return SubLoop->contains(PHI); }))
85         return true;
86     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
87       return true;
88   }
89   return false;
90 }
91 
92 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
93                                             TTI::UnrollingPreferences &UP) {
94   UP.Threshold = 300; // Twice the default.
95   UP.MaxCount = std::numeric_limits<unsigned>::max();
96   UP.Partial = true;
97 
98   // TODO: Do we want runtime unrolling?
99 
100   // Maximum alloca size than can fit registers. Reserve 16 registers.
101   const unsigned MaxAlloca = (256 - 16) * 4;
102   unsigned ThresholdPrivate = UnrollThresholdPrivate;
103   unsigned ThresholdLocal = UnrollThresholdLocal;
104   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
105   AMDGPUAS ASST = ST->getAMDGPUAS();
106   for (const BasicBlock *BB : L->getBlocks()) {
107     const DataLayout &DL = BB->getModule()->getDataLayout();
108     unsigned LocalGEPsSeen = 0;
109 
110     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
111                return SubLoop->contains(BB); }))
112         continue; // Block belongs to an inner loop.
113 
114     for (const Instruction &I : *BB) {
115       // Unroll a loop which contains an "if" statement whose condition
116       // defined by a PHI belonging to the loop. This may help to eliminate
117       // if region and potentially even PHI itself, saving on both divergence
118       // and registers used for the PHI.
119       // Add a small bonus for each of such "if" statements.
120       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
121         if (UP.Threshold < MaxBoost && Br->isConditional()) {
122           if (L->isLoopExiting(Br->getSuccessor(0)) ||
123               L->isLoopExiting(Br->getSuccessor(1)))
124             continue;
125           if (dependsOnLocalPhi(L, Br->getCondition())) {
126             UP.Threshold += UnrollThresholdIf;
127             DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
128                          << " for loop:\n" << *L << " due to " << *Br << '\n');
129             if (UP.Threshold >= MaxBoost)
130               return;
131           }
132         }
133         continue;
134       }
135 
136       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
137       if (!GEP)
138         continue;
139 
140       unsigned AS = GEP->getAddressSpace();
141       unsigned Threshold = 0;
142       if (AS == ASST.PRIVATE_ADDRESS)
143         Threshold = ThresholdPrivate;
144       else if (AS == ASST.LOCAL_ADDRESS)
145         Threshold = ThresholdLocal;
146       else
147         continue;
148 
149       if (UP.Threshold >= Threshold)
150         continue;
151 
152       if (AS == ASST.PRIVATE_ADDRESS) {
153         const Value *Ptr = GEP->getPointerOperand();
154         const AllocaInst *Alloca =
155             dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
156         if (!Alloca || !Alloca->isStaticAlloca())
157           continue;
158         Type *Ty = Alloca->getAllocatedType();
159         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
160         if (AllocaSize > MaxAlloca)
161           continue;
162       } else if (AS == ASST.LOCAL_ADDRESS) {
163         LocalGEPsSeen++;
164         // Inhibit unroll for local memory if we have seen addressing not to
165         // a variable, most likely we will be unable to combine it.
166         // Do not unroll too deep inner loops for local memory to give a chance
167         // to unroll an outer loop for a more important reason.
168         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
169             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
170              !isa<Argument>(GEP->getPointerOperand())))
171           continue;
172       }
173 
174       // Check if GEP depends on a value defined by this loop itself.
175       bool HasLoopDef = false;
176       for (const Value *Op : GEP->operands()) {
177         const Instruction *Inst = dyn_cast<Instruction>(Op);
178         if (!Inst || L->isLoopInvariant(Op))
179           continue;
180 
181         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
182              return SubLoop->contains(Inst); }))
183           continue;
184         HasLoopDef = true;
185         break;
186       }
187       if (!HasLoopDef)
188         continue;
189 
190       // We want to do whatever we can to limit the number of alloca
191       // instructions that make it through to the code generator.  allocas
192       // require us to use indirect addressing, which is slow and prone to
193       // compiler bugs.  If this loop does an address calculation on an
194       // alloca ptr, then we want to use a higher than normal loop unroll
195       // threshold. This will give SROA a better chance to eliminate these
196       // allocas.
197       //
198       // We also want to have more unrolling for local memory to let ds
199       // instructions with different offsets combine.
200       //
201       // Don't use the maximum allowed value here as it will make some
202       // programs way too big.
203       UP.Threshold = Threshold;
204       DEBUG(dbgs() << "Set unroll threshold " << Threshold << " for loop:\n"
205                    << *L << " due to " << *GEP << '\n');
206       if (UP.Threshold >= MaxBoost)
207         return;
208     }
209   }
210 }
211 
212 unsigned AMDGPUTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
213   // The concept of vector registers doesn't really exist. Some packed vector
214   // operations operate on the normal 32-bit registers.
215 
216   // Number of VGPRs on SI.
217   if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
218     return 256;
219 
220   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
221 }
222 
223 unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) const {
224   // This is really the number of registers to fill when vectorizing /
225   // interleaving loops, so we lie to avoid trying to use all registers.
226   return getHardwareNumberOfRegisters(Vec) >> 3;
227 }
228 
229 unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) const {
230   return 32;
231 }
232 
233 unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const {
234   return 32;
235 }
236 
237 unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
238                                             unsigned ChainSizeInBytes,
239                                             VectorType *VecTy) const {
240   unsigned VecRegBitWidth = VF * LoadSize;
241   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
242     // TODO: Support element-size less than 32bit?
243     return 128 / LoadSize;
244 
245   return VF;
246 }
247 
248 unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
249                                              unsigned ChainSizeInBytes,
250                                              VectorType *VecTy) const {
251   unsigned VecRegBitWidth = VF * StoreSize;
252   if (VecRegBitWidth > 128)
253     return 128 / StoreSize;
254 
255   return VF;
256 }
257 
258 unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
259   AMDGPUAS AS = ST->getAMDGPUAS();
260   if (AddrSpace == AS.GLOBAL_ADDRESS ||
261       AddrSpace == AS.CONSTANT_ADDRESS ||
262       AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
263     if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
264       return 128;
265     return 512;
266   }
267 
268   if (AddrSpace == AS.FLAT_ADDRESS)
269     return 128;
270 
271   if (AddrSpace == AS.LOCAL_ADDRESS ||
272       AddrSpace == AS.REGION_ADDRESS)
273     return ST->useDS128() ? 128 : 64;
274 
275   if (AddrSpace == AS.PRIVATE_ADDRESS)
276     return 8 * ST->getMaxPrivateElementSize();
277 
278   if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
279       (AddrSpace == AS.PARAM_D_ADDRESS ||
280       AddrSpace == AS.PARAM_I_ADDRESS ||
281        (AddrSpace >= AS.CONSTANT_BUFFER_0 &&
282         AddrSpace <= AS.CONSTANT_BUFFER_15)))
283     return 128;
284   llvm_unreachable("unhandled address space");
285 }
286 
287 bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
288                                                unsigned Alignment,
289                                                unsigned AddrSpace) const {
290   // We allow vectorization of flat stores, even though we may need to decompose
291   // them later if they may access private memory. We don't have enough context
292   // here, and legalization can handle it.
293   if (AddrSpace == ST->getAMDGPUAS().PRIVATE_ADDRESS) {
294     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
295       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
296   }
297   return true;
298 }
299 
300 bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
301                                                 unsigned Alignment,
302                                                 unsigned AddrSpace) const {
303   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
304 }
305 
306 bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
307                                                  unsigned Alignment,
308                                                  unsigned AddrSpace) const {
309   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
310 }
311 
312 unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
313   // Disable unrolling if the loop is not vectorized.
314   // TODO: Enable this again.
315   if (VF == 1)
316     return 1;
317 
318   return 8;
319 }
320 
321 bool AMDGPUTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
322                                        MemIntrinsicInfo &Info) const {
323   switch (Inst->getIntrinsicID()) {
324   case Intrinsic::amdgcn_atomic_inc:
325   case Intrinsic::amdgcn_atomic_dec:
326   case Intrinsic::amdgcn_ds_fadd:
327   case Intrinsic::amdgcn_ds_fmin:
328   case Intrinsic::amdgcn_ds_fmax: {
329     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
330     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
331     if (!Ordering || !Volatile)
332       return false; // Invalid.
333 
334     unsigned OrderingVal = Ordering->getZExtValue();
335     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
336       return false;
337 
338     Info.PtrVal = Inst->getArgOperand(0);
339     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
340     Info.ReadMem = true;
341     Info.WriteMem = true;
342     Info.IsVolatile = !Volatile->isNullValue();
343     return true;
344   }
345   default:
346     return false;
347   }
348 }
349 
350 int AMDGPUTTIImpl::getArithmeticInstrCost(
351     unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
352     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
353     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
354   EVT OrigTy = TLI->getValueType(DL, Ty);
355   if (!OrigTy.isSimple()) {
356     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
357                                          Opd1PropInfo, Opd2PropInfo);
358   }
359 
360   // Legalize the type.
361   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
362   int ISD = TLI->InstructionOpcodeToISD(Opcode);
363 
364   // Because we don't have any legal vector operations, but the legal types, we
365   // need to account for split vectors.
366   unsigned NElts = LT.second.isVector() ?
367     LT.second.getVectorNumElements() : 1;
368 
369   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
370 
371   switch (ISD) {
372   case ISD::SHL:
373   case ISD::SRL:
374   case ISD::SRA:
375     if (SLT == MVT::i64)
376       return get64BitInstrCost() * LT.first * NElts;
377 
378     // i32
379     return getFullRateInstrCost() * LT.first * NElts;
380   case ISD::ADD:
381   case ISD::SUB:
382   case ISD::AND:
383   case ISD::OR:
384   case ISD::XOR:
385     if (SLT == MVT::i64){
386       // and, or and xor are typically split into 2 VALU instructions.
387       return 2 * getFullRateInstrCost() * LT.first * NElts;
388     }
389 
390     return LT.first * NElts * getFullRateInstrCost();
391   case ISD::MUL: {
392     const int QuarterRateCost = getQuarterRateInstrCost();
393     if (SLT == MVT::i64) {
394       const int FullRateCost = getFullRateInstrCost();
395       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
396     }
397 
398     // i32
399     return QuarterRateCost * NElts * LT.first;
400   }
401   case ISD::FADD:
402   case ISD::FSUB:
403   case ISD::FMUL:
404     if (SLT == MVT::f64)
405       return LT.first * NElts * get64BitInstrCost();
406 
407     if (SLT == MVT::f32 || SLT == MVT::f16)
408       return LT.first * NElts * getFullRateInstrCost();
409     break;
410   case ISD::FDIV:
411   case ISD::FREM:
412     // FIXME: frem should be handled separately. The fdiv in it is most of it,
413     // but the current lowering is also not entirely correct.
414     if (SLT == MVT::f64) {
415       int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
416       // Add cost of workaround.
417       if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
418         Cost += 3 * getFullRateInstrCost();
419 
420       return LT.first * Cost * NElts;
421     }
422 
423     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
424       // TODO: This is more complicated, unsafe flags etc.
425       if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) ||
426           (SLT == MVT::f16 && ST->has16BitInsts())) {
427         return LT.first * getQuarterRateInstrCost() * NElts;
428       }
429     }
430 
431     if (SLT == MVT::f16 && ST->has16BitInsts()) {
432       // 2 x v_cvt_f32_f16
433       // f32 rcp
434       // f32 fmul
435       // v_cvt_f16_f32
436       // f16 div_fixup
437       int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
438       return LT.first * Cost * NElts;
439     }
440 
441     if (SLT == MVT::f32 || SLT == MVT::f16) {
442       int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
443 
444       if (!ST->hasFP32Denormals()) {
445         // FP mode switches.
446         Cost += 2 * getFullRateInstrCost();
447       }
448 
449       return LT.first * NElts * Cost;
450     }
451     break;
452   default:
453     break;
454   }
455 
456   return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
457                                        Opd1PropInfo, Opd2PropInfo);
458 }
459 
460 unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
461   // XXX - For some reason this isn't called for switch.
462   switch (Opcode) {
463   case Instruction::Br:
464   case Instruction::Ret:
465     return 10;
466   default:
467     return BaseT::getCFInstrCost(Opcode);
468   }
469 }
470 
471 int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
472                                       unsigned Index) {
473   switch (Opcode) {
474   case Instruction::ExtractElement:
475   case Instruction::InsertElement: {
476     unsigned EltSize
477       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
478     if (EltSize < 32) {
479       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
480         return 0;
481       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
482     }
483 
484     // Extracts are just reads of a subregister, so are free. Inserts are
485     // considered free because we don't want to have any cost for scalarizing
486     // operations, and we don't have to copy into a different register class.
487 
488     // Dynamic indexing isn't free and is best avoided.
489     return Index == ~0u ? 2 : 0;
490   }
491   default:
492     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
493   }
494 }
495 
496 
497 
498 static bool isArgPassedInSGPR(const Argument *A) {
499   const Function *F = A->getParent();
500 
501   // Arguments to compute shaders are never a source of divergence.
502   CallingConv::ID CC = F->getCallingConv();
503   switch (CC) {
504   case CallingConv::AMDGPU_KERNEL:
505   case CallingConv::SPIR_KERNEL:
506     return true;
507   case CallingConv::AMDGPU_VS:
508   case CallingConv::AMDGPU_LS:
509   case CallingConv::AMDGPU_HS:
510   case CallingConv::AMDGPU_ES:
511   case CallingConv::AMDGPU_GS:
512   case CallingConv::AMDGPU_PS:
513   case CallingConv::AMDGPU_CS:
514     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
515     // Everything else is in VGPRs.
516     return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
517            F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
518   default:
519     // TODO: Should calls support inreg for SGPR inputs?
520     return false;
521   }
522 }
523 
524 /// \returns true if the result of the value could potentially be
525 /// different across workitems in a wavefront.
526 bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
527   if (const Argument *A = dyn_cast<Argument>(V))
528     return !isArgPassedInSGPR(A);
529 
530   // Loads from the private address space are divergent, because threads
531   // can execute the load instruction with the same inputs and get different
532   // results.
533   //
534   // All other loads are not divergent, because if threads issue loads with the
535   // same arguments, they will always get the same result.
536   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
537     return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS;
538 
539   // Atomics are divergent because they are executed sequentially: when an
540   // atomic operation refers to the same address in each thread, then each
541   // thread after the first sees the value written by the previous thread as
542   // original value.
543   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
544     return true;
545 
546   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
547     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
548 
549   // Assume all function calls are a source of divergence.
550   if (isa<CallInst>(V) || isa<InvokeInst>(V))
551     return true;
552 
553   return false;
554 }
555 
556 bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const {
557   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
558     switch (Intrinsic->getIntrinsicID()) {
559     default:
560       return false;
561     case Intrinsic::amdgcn_readfirstlane:
562     case Intrinsic::amdgcn_readlane:
563       return true;
564     }
565   }
566   return false;
567 }
568 
569 unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
570                                        Type *SubTp) {
571   if (ST->hasVOP3PInsts()) {
572     VectorType *VT = cast<VectorType>(Tp);
573     if (VT->getNumElements() == 2 &&
574         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
575       // With op_sel VOP3P instructions freely can access the low half or high
576       // half of a register, so any swizzle is free.
577 
578       switch (Kind) {
579       case TTI::SK_Broadcast:
580       case TTI::SK_Reverse:
581       case TTI::SK_PermuteSingleSrc:
582         return 0;
583       default:
584         break;
585       }
586     }
587   }
588 
589   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
590 }
591 
592 bool AMDGPUTTIImpl::areInlineCompatible(const Function *Caller,
593                                         const Function *Callee) const {
594   const TargetMachine &TM = getTLI()->getTargetMachine();
595   const FeatureBitset &CallerBits =
596     TM.getSubtargetImpl(*Caller)->getFeatureBits();
597   const FeatureBitset &CalleeBits =
598     TM.getSubtargetImpl(*Callee)->getFeatureBits();
599 
600   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
601   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
602   return ((RealCallerBits & RealCalleeBits) == RealCalleeBits);
603 }
604