1 //===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // \file 11 // This file implements a TargetTransformInfo analysis pass specific to the 12 // AMDGPU target machine. It uses the target's detailed information to provide 13 // more precise answers to certain TTI queries, while letting the target 14 // independent and default TTI implementations handle the rest. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "AMDGPUTargetTransformInfo.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/BasicTTIImpl.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/Intrinsics.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Target/CostTable.h" 27 #include "llvm/Target/TargetLowering.h" 28 using namespace llvm; 29 30 #define DEBUG_TYPE "AMDGPUtti" 31 32 33 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, 34 TTI::UnrollingPreferences &UP) { 35 UP.Threshold = 300; // Twice the default. 36 UP.MaxCount = UINT_MAX; 37 UP.Partial = true; 38 39 // TODO: Do we want runtime unrolling? 40 41 for (const BasicBlock *BB : L->getBlocks()) { 42 const DataLayout &DL = BB->getModule()->getDataLayout(); 43 for (const Instruction &I : *BB) { 44 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I); 45 if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) 46 continue; 47 48 const Value *Ptr = GEP->getPointerOperand(); 49 const AllocaInst *Alloca = 50 dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL)); 51 if (Alloca) { 52 // We want to do whatever we can to limit the number of alloca 53 // instructions that make it through to the code generator. allocas 54 // require us to use indirect addressing, which is slow and prone to 55 // compiler bugs. If this loop does an address calculation on an 56 // alloca ptr, then we want to use a higher than normal loop unroll 57 // threshold. This will give SROA a better chance to eliminate these 58 // allocas. 59 // 60 // Don't use the maximum allowed value here as it will make some 61 // programs way too big. 62 UP.Threshold = 800; 63 } 64 } 65 } 66 } 67 68 unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) { 69 if (Vec) 70 return 0; 71 72 // Number of VGPRs on SI. 73 if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) 74 return 256; 75 76 return 4 * 128; // XXX - 4 channels. Should these count as vector instead? 77 } 78 79 unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) { 80 return Vector ? 0 : 32; 81 } 82 83 unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) { 84 // Semi-arbitrary large amount. 85 return 64; 86 } 87 88 int AMDGPUTTIImpl::getArithmeticInstrCost( 89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, 90 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 91 TTI::OperandValueProperties Opd2PropInfo) { 92 93 EVT OrigTy = TLI->getValueType(DL, Ty); 94 if (!OrigTy.isSimple()) { 95 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 96 Opd1PropInfo, Opd2PropInfo); 97 } 98 99 // Legalize the type. 100 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 101 int ISD = TLI->InstructionOpcodeToISD(Opcode); 102 103 // Because we don't have any legal vector operations, but the legal types, we 104 // need to account for split vectors. 105 unsigned NElts = LT.second.isVector() ? 106 LT.second.getVectorNumElements() : 1; 107 108 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; 109 110 switch (ISD) { 111 case ISD::SHL: 112 case ISD::SRL: 113 case ISD::SRA: { 114 if (SLT == MVT::i64) 115 return get64BitInstrCost() * LT.first * NElts; 116 117 // i32 118 return getFullRateInstrCost() * LT.first * NElts; 119 } 120 case ISD::ADD: 121 case ISD::SUB: 122 case ISD::AND: 123 case ISD::OR: 124 case ISD::XOR: { 125 if (SLT == MVT::i64){ 126 // and, or and xor are typically split into 2 VALU instructions. 127 return 2 * getFullRateInstrCost() * LT.first * NElts; 128 } 129 130 return LT.first * NElts * getFullRateInstrCost(); 131 } 132 case ISD::MUL: { 133 const int QuarterRateCost = getQuarterRateInstrCost(); 134 if (SLT == MVT::i64) { 135 const int FullRateCost = getFullRateInstrCost(); 136 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts; 137 } 138 139 // i32 140 return QuarterRateCost * NElts * LT.first; 141 } 142 case ISD::FADD: 143 case ISD::FSUB: 144 case ISD::FMUL: 145 if (SLT == MVT::f64) 146 return LT.first * NElts * get64BitInstrCost(); 147 148 if (SLT == MVT::f32 || SLT == MVT::f16) 149 return LT.first * NElts * getFullRateInstrCost(); 150 break; 151 152 case ISD::FDIV: 153 case ISD::FREM: 154 // FIXME: frem should be handled separately. The fdiv in it is most of it, 155 // but the current lowering is also not entirely correct. 156 if (SLT == MVT::f64) { 157 int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost(); 158 159 // Add cost of workaround. 160 if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) 161 Cost += 3 * getFullRateInstrCost(); 162 163 return LT.first * Cost * NElts; 164 } 165 166 // Assuming no fp32 denormals lowering. 167 if (SLT == MVT::f32 || SLT == MVT::f16) { 168 assert(!ST->hasFP32Denormals() && "will change when supported"); 169 int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost(); 170 return LT.first * NElts * Cost; 171 } 172 173 break; 174 default: 175 break; 176 } 177 178 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 179 Opd1PropInfo, Opd2PropInfo); 180 } 181 182 unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) { 183 // XXX - For some reason this isn't called for switch. 184 switch (Opcode) { 185 case Instruction::Br: 186 case Instruction::Ret: 187 return 10; 188 default: 189 return BaseT::getCFInstrCost(Opcode); 190 } 191 } 192 193 int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 194 unsigned Index) { 195 switch (Opcode) { 196 case Instruction::ExtractElement: 197 case Instruction::InsertElement: 198 // Extracts are just reads of a subregister, so are free. Inserts are 199 // considered free because we don't want to have any cost for scalarizing 200 // operations, and we don't have to copy into a different register class. 201 202 // Dynamic indexing isn't free and is best avoided. 203 return Index == ~0u ? 2 : 0; 204 default: 205 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 206 } 207 } 208 209 static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII, 210 const IntrinsicInst *I) { 211 switch (I->getIntrinsicID()) { 212 default: 213 return false; 214 case Intrinsic::not_intrinsic: 215 // This means we have an intrinsic that isn't defined in 216 // IntrinsicsAMDGPU.td 217 break; 218 219 case Intrinsic::amdgcn_workitem_id_x: 220 case Intrinsic::amdgcn_workitem_id_y: 221 case Intrinsic::amdgcn_workitem_id_z: 222 case Intrinsic::amdgcn_interp_p1: 223 case Intrinsic::amdgcn_interp_p2: 224 case Intrinsic::amdgcn_mbcnt_hi: 225 case Intrinsic::amdgcn_mbcnt_lo: 226 case Intrinsic::r600_read_tidig_x: 227 case Intrinsic::r600_read_tidig_y: 228 case Intrinsic::r600_read_tidig_z: 229 case Intrinsic::amdgcn_image_atomic_swap: 230 case Intrinsic::amdgcn_image_atomic_add: 231 case Intrinsic::amdgcn_image_atomic_sub: 232 case Intrinsic::amdgcn_image_atomic_smin: 233 case Intrinsic::amdgcn_image_atomic_umin: 234 case Intrinsic::amdgcn_image_atomic_smax: 235 case Intrinsic::amdgcn_image_atomic_umax: 236 case Intrinsic::amdgcn_image_atomic_and: 237 case Intrinsic::amdgcn_image_atomic_or: 238 case Intrinsic::amdgcn_image_atomic_xor: 239 case Intrinsic::amdgcn_image_atomic_inc: 240 case Intrinsic::amdgcn_image_atomic_dec: 241 case Intrinsic::amdgcn_image_atomic_cmpswap: 242 case Intrinsic::amdgcn_buffer_atomic_swap: 243 case Intrinsic::amdgcn_buffer_atomic_add: 244 case Intrinsic::amdgcn_buffer_atomic_sub: 245 case Intrinsic::amdgcn_buffer_atomic_smin: 246 case Intrinsic::amdgcn_buffer_atomic_umin: 247 case Intrinsic::amdgcn_buffer_atomic_smax: 248 case Intrinsic::amdgcn_buffer_atomic_umax: 249 case Intrinsic::amdgcn_buffer_atomic_and: 250 case Intrinsic::amdgcn_buffer_atomic_or: 251 case Intrinsic::amdgcn_buffer_atomic_xor: 252 case Intrinsic::amdgcn_buffer_atomic_cmpswap: 253 return true; 254 } 255 256 StringRef Name = I->getCalledFunction()->getName(); 257 switch (TII->lookupName((const char *)Name.bytes_begin(), Name.size())) { 258 default: 259 return false; 260 case AMDGPUIntrinsic::SI_tid: 261 case AMDGPUIntrinsic::SI_fs_interp: 262 return true; 263 } 264 } 265 266 static bool isArgPassedInSGPR(const Argument *A) { 267 const Function *F = A->getParent(); 268 269 // Arguments to compute shaders are never a source of divergence. 270 if (!AMDGPU::isShader(F->getCallingConv())) 271 return true; 272 273 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 274 if (F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::InReg) || 275 F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::ByVal)) 276 return true; 277 278 // Everything else is in VGPRs. 279 return false; 280 } 281 282 /// 283 /// \returns true if the result of the value could potentially be 284 /// different across workitems in a wavefront. 285 bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const { 286 287 if (const Argument *A = dyn_cast<Argument>(V)) 288 return !isArgPassedInSGPR(A); 289 290 // Loads from the private address space are divergent, because threads 291 // can execute the load instruction with the same inputs and get different 292 // results. 293 // 294 // All other loads are not divergent, because if threads issue loads with the 295 // same arguments, they will always get the same result. 296 if (const LoadInst *Load = dyn_cast<LoadInst>(V)) 297 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; 298 299 // Atomics are divergent because they are executed sequentially: when an 300 // atomic operation refers to the same address in each thread, then each 301 // thread after the first sees the value written by the previous thread as 302 // original value. 303 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V)) 304 return true; 305 306 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { 307 const TargetMachine &TM = getTLI()->getTargetMachine(); 308 return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic); 309 } 310 311 // Assume all function calls are a source of divergence. 312 if (isa<CallInst>(V) || isa<InvokeInst>(V)) 313 return true; 314 315 return false; 316 } 317