1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // \file 10 // This file implements a TargetTransformInfo analysis pass specific to the 11 // AMDGPU target machine. It uses the target's detailed information to provide 12 // more precise answers to certain TTI queries, while letting the target 13 // independent and default TTI implementations handle the rest. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "AMDGPUTargetTransformInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/IR/IntrinsicsAMDGPU.h" 22 #include "llvm/IR/PatternMatch.h" 23 #include "llvm/Support/KnownBits.h" 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "AMDGPUtti" 28 29 static cl::opt<unsigned> UnrollThresholdPrivate( 30 "amdgpu-unroll-threshold-private", 31 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"), 32 cl::init(2700), cl::Hidden); 33 34 static cl::opt<unsigned> UnrollThresholdLocal( 35 "amdgpu-unroll-threshold-local", 36 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"), 37 cl::init(1000), cl::Hidden); 38 39 static cl::opt<unsigned> UnrollThresholdIf( 40 "amdgpu-unroll-threshold-if", 41 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"), 42 cl::init(150), cl::Hidden); 43 44 static cl::opt<bool> UnrollRuntimeLocal( 45 "amdgpu-unroll-runtime-local", 46 cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"), 47 cl::init(true), cl::Hidden); 48 49 static cl::opt<bool> UseLegacyDA( 50 "amdgpu-use-legacy-divergence-analysis", 51 cl::desc("Enable legacy divergence analysis for AMDGPU"), 52 cl::init(false), cl::Hidden); 53 54 static cl::opt<unsigned> UnrollMaxBlockToAnalyze( 55 "amdgpu-unroll-max-block-to-analyze", 56 cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"), 57 cl::init(32), cl::Hidden); 58 59 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, 60 unsigned Depth = 0) { 61 const Instruction *I = dyn_cast<Instruction>(Cond); 62 if (!I) 63 return false; 64 65 for (const Value *V : I->operand_values()) { 66 if (!L->contains(I)) 67 continue; 68 if (const PHINode *PHI = dyn_cast<PHINode>(V)) { 69 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) { 70 return SubLoop->contains(PHI); })) 71 return true; 72 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1)) 73 return true; 74 } 75 return false; 76 } 77 78 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) 79 : BaseT(TM, F.getParent()->getDataLayout()), 80 TargetTriple(TM->getTargetTriple()), 81 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))), 82 TLI(ST->getTargetLowering()) {} 83 84 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 85 TTI::UnrollingPreferences &UP) { 86 const Function &F = *L->getHeader()->getParent(); 87 UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300); 88 UP.MaxCount = std::numeric_limits<unsigned>::max(); 89 UP.Partial = true; 90 91 // TODO: Do we want runtime unrolling? 92 93 // Maximum alloca size than can fit registers. Reserve 16 registers. 94 const unsigned MaxAlloca = (256 - 16) * 4; 95 unsigned ThresholdPrivate = UnrollThresholdPrivate; 96 unsigned ThresholdLocal = UnrollThresholdLocal; 97 98 // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the 99 // provided threshold value as the default for Threshold 100 if (MDNode *LoopUnrollThreshold = 101 findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) { 102 if (LoopUnrollThreshold->getNumOperands() == 2) { 103 ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>( 104 LoopUnrollThreshold->getOperand(1)); 105 if (MetaThresholdValue) { 106 // We will also use the supplied value for PartialThreshold for now. 107 // We may introduce additional metadata if it becomes necessary in the 108 // future. 109 UP.Threshold = MetaThresholdValue->getSExtValue(); 110 UP.PartialThreshold = UP.Threshold; 111 ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold); 112 ThresholdLocal = std::min(ThresholdLocal, UP.Threshold); 113 } 114 } 115 } 116 117 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal); 118 for (const BasicBlock *BB : L->getBlocks()) { 119 const DataLayout &DL = BB->getModule()->getDataLayout(); 120 unsigned LocalGEPsSeen = 0; 121 122 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) { 123 return SubLoop->contains(BB); })) 124 continue; // Block belongs to an inner loop. 125 126 for (const Instruction &I : *BB) { 127 // Unroll a loop which contains an "if" statement whose condition 128 // defined by a PHI belonging to the loop. This may help to eliminate 129 // if region and potentially even PHI itself, saving on both divergence 130 // and registers used for the PHI. 131 // Add a small bonus for each of such "if" statements. 132 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) { 133 if (UP.Threshold < MaxBoost && Br->isConditional()) { 134 BasicBlock *Succ0 = Br->getSuccessor(0); 135 BasicBlock *Succ1 = Br->getSuccessor(1); 136 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) || 137 (L->contains(Succ1) && L->isLoopExiting(Succ1))) 138 continue; 139 if (dependsOnLocalPhi(L, Br->getCondition())) { 140 UP.Threshold += UnrollThresholdIf; 141 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold 142 << " for loop:\n" 143 << *L << " due to " << *Br << '\n'); 144 if (UP.Threshold >= MaxBoost) 145 return; 146 } 147 } 148 continue; 149 } 150 151 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I); 152 if (!GEP) 153 continue; 154 155 unsigned AS = GEP->getAddressSpace(); 156 unsigned Threshold = 0; 157 if (AS == AMDGPUAS::PRIVATE_ADDRESS) 158 Threshold = ThresholdPrivate; 159 else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) 160 Threshold = ThresholdLocal; 161 else 162 continue; 163 164 if (UP.Threshold >= Threshold) 165 continue; 166 167 if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 168 const Value *Ptr = GEP->getPointerOperand(); 169 const AllocaInst *Alloca = 170 dyn_cast<AllocaInst>(getUnderlyingObject(Ptr)); 171 if (!Alloca || !Alloca->isStaticAlloca()) 172 continue; 173 Type *Ty = Alloca->getAllocatedType(); 174 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0; 175 if (AllocaSize > MaxAlloca) 176 continue; 177 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || 178 AS == AMDGPUAS::REGION_ADDRESS) { 179 LocalGEPsSeen++; 180 // Inhibit unroll for local memory if we have seen addressing not to 181 // a variable, most likely we will be unable to combine it. 182 // Do not unroll too deep inner loops for local memory to give a chance 183 // to unroll an outer loop for a more important reason. 184 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 || 185 (!isa<GlobalVariable>(GEP->getPointerOperand()) && 186 !isa<Argument>(GEP->getPointerOperand()))) 187 continue; 188 LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n" 189 << *L << " due to LDS use.\n"); 190 UP.Runtime = UnrollRuntimeLocal; 191 } 192 193 // Check if GEP depends on a value defined by this loop itself. 194 bool HasLoopDef = false; 195 for (const Value *Op : GEP->operands()) { 196 const Instruction *Inst = dyn_cast<Instruction>(Op); 197 if (!Inst || L->isLoopInvariant(Op)) 198 continue; 199 200 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) { 201 return SubLoop->contains(Inst); })) 202 continue; 203 HasLoopDef = true; 204 break; 205 } 206 if (!HasLoopDef) 207 continue; 208 209 // We want to do whatever we can to limit the number of alloca 210 // instructions that make it through to the code generator. allocas 211 // require us to use indirect addressing, which is slow and prone to 212 // compiler bugs. If this loop does an address calculation on an 213 // alloca ptr, then we want to use a higher than normal loop unroll 214 // threshold. This will give SROA a better chance to eliminate these 215 // allocas. 216 // 217 // We also want to have more unrolling for local memory to let ds 218 // instructions with different offsets combine. 219 // 220 // Don't use the maximum allowed value here as it will make some 221 // programs way too big. 222 UP.Threshold = Threshold; 223 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold 224 << " for loop:\n" 225 << *L << " due to " << *GEP << '\n'); 226 if (UP.Threshold >= MaxBoost) 227 return; 228 } 229 230 // If we got a GEP in a small BB from inner loop then increase max trip 231 // count to analyze for better estimation cost in unroll 232 if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze) 233 UP.MaxIterationsCountToAnalyze = 32; 234 } 235 } 236 237 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 238 TTI::PeelingPreferences &PP) { 239 BaseT::getPeelingPreferences(L, SE, PP); 240 } 241 242 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = { 243 // Codegen control options which don't matter. 244 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler, 245 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal, 246 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess, 247 AMDGPU::FeatureUnalignedAccessMode, 248 249 AMDGPU::FeatureAutoWaitcntBeforeBarrier, 250 251 // Property of the kernel/environment which can't actually differ. 252 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK, 253 AMDGPU::FeatureTrapHandler, 254 255 // The default assumption needs to be ecc is enabled, but no directly 256 // exposed operations depend on it, so it can be safely inlined. 257 AMDGPU::FeatureSRAMECC, 258 259 // Perf-tuning features 260 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops}; 261 262 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) 263 : BaseT(TM, F.getParent()->getDataLayout()), 264 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))), 265 TLI(ST->getTargetLowering()), CommonTTI(TM, F), 266 IsGraphics(AMDGPU::isGraphics(F.getCallingConv())), 267 MaxVGPRs(ST->getMaxNumVGPRs( 268 std::max(ST->getWavesPerEU(F).first, 269 ST->getWavesPerEUForWorkGroup( 270 ST->getFlatWorkGroupSizes(F).second)))) { 271 AMDGPU::SIModeRegisterDefaults Mode(F); 272 HasFP32Denormals = Mode.allFP32Denormals(); 273 HasFP64FP16Denormals = Mode.allFP64FP16Denormals(); 274 } 275 276 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const { 277 // The concept of vector registers doesn't really exist. Some packed vector 278 // operations operate on the normal 32-bit registers. 279 return MaxVGPRs; 280 } 281 282 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const { 283 // This is really the number of registers to fill when vectorizing / 284 // interleaving loops, so we lie to avoid trying to use all registers. 285 return getHardwareNumberOfRegisters(Vec) >> 3; 286 } 287 288 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const { 289 const SIRegisterInfo *TRI = ST->getRegisterInfo(); 290 const TargetRegisterClass *RC = TRI->getRegClass(RCID); 291 unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32; 292 return getHardwareNumberOfRegisters(false) / NumVGPRs; 293 } 294 295 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const { 296 return 32; 297 } 298 299 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const { 300 return 32; 301 } 302 303 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 304 if (Opcode == Instruction::Load || Opcode == Instruction::Store) 305 return 32 * 4 / ElemWidth; 306 return (ElemWidth == 16 && ST->has16BitInsts()) ? 2 : 1; 307 } 308 309 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize, 310 unsigned ChainSizeInBytes, 311 VectorType *VecTy) const { 312 unsigned VecRegBitWidth = VF * LoadSize; 313 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32) 314 // TODO: Support element-size less than 32bit? 315 return 128 / LoadSize; 316 317 return VF; 318 } 319 320 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize, 321 unsigned ChainSizeInBytes, 322 VectorType *VecTy) const { 323 unsigned VecRegBitWidth = VF * StoreSize; 324 if (VecRegBitWidth > 128) 325 return 128 / StoreSize; 326 327 return VF; 328 } 329 330 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { 331 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS || 332 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || 333 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 334 AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) { 335 return 512; 336 } 337 338 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) 339 return 8 * ST->getMaxPrivateElementSize(); 340 341 // Common to flat, global, local and region. Assume for unknown addrspace. 342 return 128; 343 } 344 345 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, 346 Align Alignment, 347 unsigned AddrSpace) const { 348 // We allow vectorization of flat stores, even though we may need to decompose 349 // them later if they may access private memory. We don't have enough context 350 // here, and legalization can handle it. 351 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { 352 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) && 353 ChainSizeInBytes <= ST->getMaxPrivateElementSize(); 354 } 355 return true; 356 } 357 358 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 359 Align Alignment, 360 unsigned AddrSpace) const { 361 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 362 } 363 364 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 365 Align Alignment, 366 unsigned AddrSpace) const { 367 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 368 } 369 370 // FIXME: Really we would like to issue multiple 128-bit loads and stores per 371 // iteration. Should we report a larger size and let it legalize? 372 // 373 // FIXME: Should we use narrower types for local/region, or account for when 374 // unaligned access is legal? 375 // 376 // FIXME: This could use fine tuning and microbenchmarks. 377 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, 378 unsigned SrcAddrSpace, 379 unsigned DestAddrSpace, 380 unsigned SrcAlign, 381 unsigned DestAlign) const { 382 unsigned MinAlign = std::min(SrcAlign, DestAlign); 383 384 // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the 385 // hardware into byte accesses. If you assume all alignments are equally 386 // probable, it's more efficient on average to use short accesses for this 387 // case. 388 if (MinAlign == 2) 389 return Type::getInt16Ty(Context); 390 391 // Not all subtargets have 128-bit DS instructions, and we currently don't 392 // form them by default. 393 if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS || 394 SrcAddrSpace == AMDGPUAS::REGION_ADDRESS || 395 DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS || 396 DestAddrSpace == AMDGPUAS::REGION_ADDRESS) { 397 return FixedVectorType::get(Type::getInt32Ty(Context), 2); 398 } 399 400 // Global memory works best with 16-byte accesses. Private memory will also 401 // hit this, although they'll be decomposed. 402 return FixedVectorType::get(Type::getInt32Ty(Context), 4); 403 } 404 405 void GCNTTIImpl::getMemcpyLoopResidualLoweringType( 406 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 407 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 408 unsigned SrcAlign, unsigned DestAlign) const { 409 assert(RemainingBytes < 16); 410 411 unsigned MinAlign = std::min(SrcAlign, DestAlign); 412 413 if (MinAlign != 2) { 414 Type *I64Ty = Type::getInt64Ty(Context); 415 while (RemainingBytes >= 8) { 416 OpsOut.push_back(I64Ty); 417 RemainingBytes -= 8; 418 } 419 420 Type *I32Ty = Type::getInt32Ty(Context); 421 while (RemainingBytes >= 4) { 422 OpsOut.push_back(I32Ty); 423 RemainingBytes -= 4; 424 } 425 } 426 427 Type *I16Ty = Type::getInt16Ty(Context); 428 while (RemainingBytes >= 2) { 429 OpsOut.push_back(I16Ty); 430 RemainingBytes -= 2; 431 } 432 433 Type *I8Ty = Type::getInt8Ty(Context); 434 while (RemainingBytes) { 435 OpsOut.push_back(I8Ty); 436 --RemainingBytes; 437 } 438 } 439 440 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) { 441 // Disable unrolling if the loop is not vectorized. 442 // TODO: Enable this again. 443 if (VF == 1) 444 return 1; 445 446 return 8; 447 } 448 449 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 450 MemIntrinsicInfo &Info) const { 451 switch (Inst->getIntrinsicID()) { 452 case Intrinsic::amdgcn_atomic_inc: 453 case Intrinsic::amdgcn_atomic_dec: 454 case Intrinsic::amdgcn_ds_ordered_add: 455 case Intrinsic::amdgcn_ds_ordered_swap: 456 case Intrinsic::amdgcn_ds_fadd: 457 case Intrinsic::amdgcn_ds_fmin: 458 case Intrinsic::amdgcn_ds_fmax: { 459 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2)); 460 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4)); 461 if (!Ordering || !Volatile) 462 return false; // Invalid. 463 464 unsigned OrderingVal = Ordering->getZExtValue(); 465 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent)) 466 return false; 467 468 Info.PtrVal = Inst->getArgOperand(0); 469 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal); 470 Info.ReadMem = true; 471 Info.WriteMem = true; 472 Info.IsVolatile = !Volatile->isNullValue(); 473 return true; 474 } 475 default: 476 return false; 477 } 478 } 479 480 int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 481 TTI::TargetCostKind CostKind, 482 TTI::OperandValueKind Opd1Info, 483 TTI::OperandValueKind Opd2Info, 484 TTI::OperandValueProperties Opd1PropInfo, 485 TTI::OperandValueProperties Opd2PropInfo, 486 ArrayRef<const Value *> Args, 487 const Instruction *CxtI) { 488 EVT OrigTy = TLI->getValueType(DL, Ty); 489 if (!OrigTy.isSimple()) { 490 // FIXME: We're having to query the throughput cost so that the basic 491 // implementation tries to generate legalize and scalarization costs. Maybe 492 // we could hoist the scalarization code here? 493 if (CostKind != TTI::TCK_CodeSize) 494 return BaseT::getArithmeticInstrCost(Opcode, Ty, TTI::TCK_RecipThroughput, 495 Opd1Info, Opd2Info, Opd1PropInfo, 496 Opd2PropInfo, Args, CxtI); 497 // Scalarization 498 499 // Check if any of the operands are vector operands. 500 int ISD = TLI->InstructionOpcodeToISD(Opcode); 501 assert(ISD && "Invalid opcode"); 502 503 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 504 505 bool IsFloat = Ty->isFPOrFPVectorTy(); 506 // Assume that floating point arithmetic operations cost twice as much as 507 // integer operations. 508 unsigned OpCost = (IsFloat ? 2 : 1); 509 510 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 511 // The operation is legal. Assume it costs 1. 512 // TODO: Once we have extract/insert subvector cost we need to use them. 513 return LT.first * OpCost; 514 } 515 516 if (!TLI->isOperationExpand(ISD, LT.second)) { 517 // If the operation is custom lowered, then assume that the code is twice 518 // as expensive. 519 return LT.first * 2 * OpCost; 520 } 521 522 // Else, assume that we need to scalarize this op. 523 // TODO: If one of the types get legalized by splitting, handle this 524 // similarly to what getCastInstrCost() does. 525 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 526 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements(); 527 unsigned Cost = getArithmeticInstrCost( 528 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info, 529 Opd1PropInfo, Opd2PropInfo, Args, CxtI); 530 // Return the cost of multiple scalar invocation plus the cost of 531 // inserting and extracting the values. 532 return getScalarizationOverhead(VTy, Args) + Num * Cost; 533 } 534 535 // We don't know anything about this scalar instruction. 536 return OpCost; 537 } 538 539 // Legalize the type. 540 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 541 int ISD = TLI->InstructionOpcodeToISD(Opcode); 542 543 // Because we don't have any legal vector operations, but the legal types, we 544 // need to account for split vectors. 545 unsigned NElts = LT.second.isVector() ? 546 LT.second.getVectorNumElements() : 1; 547 548 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; 549 550 switch (ISD) { 551 case ISD::SHL: 552 case ISD::SRL: 553 case ISD::SRA: 554 if (SLT == MVT::i64) 555 return get64BitInstrCost(CostKind) * LT.first * NElts; 556 557 if (ST->has16BitInsts() && SLT == MVT::i16) 558 NElts = (NElts + 1) / 2; 559 560 // i32 561 return getFullRateInstrCost() * LT.first * NElts; 562 case ISD::ADD: 563 case ISD::SUB: 564 case ISD::AND: 565 case ISD::OR: 566 case ISD::XOR: 567 if (SLT == MVT::i64) { 568 // and, or and xor are typically split into 2 VALU instructions. 569 return 2 * getFullRateInstrCost() * LT.first * NElts; 570 } 571 572 if (ST->has16BitInsts() && SLT == MVT::i16) 573 NElts = (NElts + 1) / 2; 574 575 return LT.first * NElts * getFullRateInstrCost(); 576 case ISD::MUL: { 577 const int QuarterRateCost = getQuarterRateInstrCost(CostKind); 578 if (SLT == MVT::i64) { 579 const int FullRateCost = getFullRateInstrCost(); 580 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts; 581 } 582 583 if (ST->has16BitInsts() && SLT == MVT::i16) 584 NElts = (NElts + 1) / 2; 585 586 // i32 587 return QuarterRateCost * NElts * LT.first; 588 } 589 case ISD::FMUL: 590 // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for 591 // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole 592 // fused operation. 593 if (CxtI && CxtI->hasOneUse()) 594 if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) { 595 const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode()); 596 if (OPC == ISD::FADD || OPC == ISD::FSUB) { 597 if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals) 598 return TargetTransformInfo::TCC_Free; 599 if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals) 600 return TargetTransformInfo::TCC_Free; 601 602 // Estimate all types may be fused with contract/unsafe flags 603 const TargetOptions &Options = TLI->getTargetMachine().Options; 604 if (Options.AllowFPOpFusion == FPOpFusion::Fast || 605 Options.UnsafeFPMath || 606 (FAdd->hasAllowContract() && CxtI->hasAllowContract())) 607 return TargetTransformInfo::TCC_Free; 608 } 609 } 610 LLVM_FALLTHROUGH; 611 case ISD::FADD: 612 case ISD::FSUB: 613 if (SLT == MVT::f64) 614 return LT.first * NElts * get64BitInstrCost(CostKind); 615 616 if (ST->has16BitInsts() && SLT == MVT::f16) 617 NElts = (NElts + 1) / 2; 618 619 if (SLT == MVT::f32 || SLT == MVT::f16) 620 return LT.first * NElts * getFullRateInstrCost(); 621 break; 622 case ISD::FDIV: 623 case ISD::FREM: 624 // FIXME: frem should be handled separately. The fdiv in it is most of it, 625 // but the current lowering is also not entirely correct. 626 if (SLT == MVT::f64) { 627 int Cost = 7 * get64BitInstrCost(CostKind) + 628 getQuarterRateInstrCost(CostKind) + 629 3 * getHalfRateInstrCost(CostKind); 630 // Add cost of workaround. 631 if (!ST->hasUsableDivScaleConditionOutput()) 632 Cost += 3 * getFullRateInstrCost(); 633 634 return LT.first * Cost * NElts; 635 } 636 637 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) { 638 // TODO: This is more complicated, unsafe flags etc. 639 if ((SLT == MVT::f32 && !HasFP32Denormals) || 640 (SLT == MVT::f16 && ST->has16BitInsts())) { 641 return LT.first * getQuarterRateInstrCost(CostKind) * NElts; 642 } 643 } 644 645 if (SLT == MVT::f16 && ST->has16BitInsts()) { 646 // 2 x v_cvt_f32_f16 647 // f32 rcp 648 // f32 fmul 649 // v_cvt_f16_f32 650 // f16 div_fixup 651 int Cost = 652 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind); 653 return LT.first * Cost * NElts; 654 } 655 656 if (SLT == MVT::f32 || SLT == MVT::f16) { 657 // 4 more v_cvt_* insts without f16 insts support 658 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() + 659 1 * getQuarterRateInstrCost(CostKind); 660 661 if (!HasFP32Denormals) { 662 // FP mode switches. 663 Cost += 2 * getFullRateInstrCost(); 664 } 665 666 return LT.first * NElts * Cost; 667 } 668 break; 669 case ISD::FNEG: 670 // Use the backend' estimation. If fneg is not free each element will cost 671 // one additional instruction. 672 return TLI->isFNegFree(SLT) ? 0 : NElts; 673 default: 674 break; 675 } 676 677 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, 678 Opd1PropInfo, Opd2PropInfo, Args, CxtI); 679 } 680 681 // Return true if there's a potential benefit from using v2f16/v2i16 682 // instructions for an intrinsic, even if it requires nontrivial legalization. 683 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) { 684 switch (ID) { 685 case Intrinsic::fma: // TODO: fmuladd 686 // There's a small benefit to using vector ops in the legalized code. 687 case Intrinsic::round: 688 case Intrinsic::uadd_sat: 689 case Intrinsic::usub_sat: 690 case Intrinsic::sadd_sat: 691 case Intrinsic::ssub_sat: 692 return true; 693 default: 694 return false; 695 } 696 } 697 698 int GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 699 TTI::TargetCostKind CostKind) { 700 if (ICA.getID() == Intrinsic::fabs) 701 return 0; 702 703 if (!intrinsicHasPackedVectorBenefit(ICA.getID())) 704 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 705 706 Type *RetTy = ICA.getReturnType(); 707 EVT OrigTy = TLI->getValueType(DL, RetTy); 708 if (!OrigTy.isSimple()) { 709 if (CostKind != TTI::TCK_CodeSize) 710 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 711 712 // TODO: Combine these two logic paths. 713 if (ICA.isTypeBasedOnly()) 714 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 715 716 Type *RetTy = ICA.getReturnType(); 717 unsigned VF = ICA.getVectorFactor().getFixedValue(); 718 unsigned RetVF = 719 (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements() 720 : 1); 721 assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type"); 722 const IntrinsicInst *I = ICA.getInst(); 723 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 724 FastMathFlags FMF = ICA.getFlags(); 725 // Assume that we need to scalarize this intrinsic. 726 SmallVector<Type *, 4> Types; 727 for (const Value *Op : Args) { 728 Type *OpTy = Op->getType(); 729 assert(VF == 1 || !OpTy->isVectorTy()); 730 Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF)); 731 } 732 733 if (VF > 1 && !RetTy->isVoidTy()) 734 RetTy = FixedVectorType::get(RetTy, VF); 735 736 // Compute the scalarization overhead based on Args for a vector 737 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while 738 // CostModel will pass a vector RetTy and VF is 1. 739 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max(); 740 if (RetVF > 1 || VF > 1) { 741 ScalarizationCost = 0; 742 if (!RetTy->isVoidTy()) 743 ScalarizationCost += 744 getScalarizationOverhead(cast<VectorType>(RetTy), true, false); 745 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF); 746 } 747 748 IntrinsicCostAttributes Attrs(ICA.getID(), RetTy, Types, FMF, 749 ScalarizationCost, I); 750 return getIntrinsicInstrCost(Attrs, CostKind); 751 } 752 753 // Legalize the type. 754 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 755 756 unsigned NElts = LT.second.isVector() ? 757 LT.second.getVectorNumElements() : 1; 758 759 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; 760 761 if (SLT == MVT::f64) 762 return LT.first * NElts * get64BitInstrCost(CostKind); 763 764 if (ST->has16BitInsts() && SLT == MVT::f16) 765 NElts = (NElts + 1) / 2; 766 767 // TODO: Get more refined intrinsic costs? 768 unsigned InstRate = getQuarterRateInstrCost(CostKind); 769 if (ICA.getID() == Intrinsic::fma) { 770 InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind) 771 : getQuarterRateInstrCost(CostKind); 772 } 773 774 return LT.first * NElts * InstRate; 775 } 776 777 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode, 778 TTI::TargetCostKind CostKind) { 779 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 780 return Opcode == Instruction::PHI ? 0 : 1; 781 782 // XXX - For some reason this isn't called for switch. 783 switch (Opcode) { 784 case Instruction::Br: 785 case Instruction::Ret: 786 return 10; 787 default: 788 return BaseT::getCFInstrCost(Opcode, CostKind); 789 } 790 } 791 792 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 793 bool IsPairwise, 794 TTI::TargetCostKind CostKind) { 795 EVT OrigTy = TLI->getValueType(DL, Ty); 796 797 // Computes cost on targets that have packed math instructions(which support 798 // 16-bit types only). 799 if (IsPairwise || 800 !ST->hasVOP3PInsts() || 801 OrigTy.getScalarSizeInBits() != 16) 802 return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise, CostKind); 803 804 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 805 return LT.first * getFullRateInstrCost(); 806 } 807 808 int GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 809 bool IsPairwise, bool IsUnsigned, 810 TTI::TargetCostKind CostKind) { 811 EVT OrigTy = TLI->getValueType(DL, Ty); 812 813 // Computes cost on targets that have packed math instructions(which support 814 // 16-bit types only). 815 if (IsPairwise || 816 !ST->hasVOP3PInsts() || 817 OrigTy.getScalarSizeInBits() != 16) 818 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned, 819 CostKind); 820 821 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 822 return LT.first * getHalfRateInstrCost(CostKind); 823 } 824 825 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 826 unsigned Index) { 827 switch (Opcode) { 828 case Instruction::ExtractElement: 829 case Instruction::InsertElement: { 830 unsigned EltSize 831 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType()); 832 if (EltSize < 32) { 833 if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) 834 return 0; 835 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 836 } 837 838 // Extracts are just reads of a subregister, so are free. Inserts are 839 // considered free because we don't want to have any cost for scalarizing 840 // operations, and we don't have to copy into a different register class. 841 842 // Dynamic indexing isn't free and is best avoided. 843 return Index == ~0u ? 2 : 0; 844 } 845 default: 846 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 847 } 848 } 849 850 /// Analyze if the results of inline asm are divergent. If \p Indices is empty, 851 /// this is analyzing the collective result of all output registers. Otherwise, 852 /// this is only querying a specific result index if this returns multiple 853 /// registers in a struct. 854 bool GCNTTIImpl::isInlineAsmSourceOfDivergence( 855 const CallInst *CI, ArrayRef<unsigned> Indices) const { 856 // TODO: Handle complex extract indices 857 if (Indices.size() > 1) 858 return true; 859 860 const DataLayout &DL = CI->getModule()->getDataLayout(); 861 const SIRegisterInfo *TRI = ST->getRegisterInfo(); 862 TargetLowering::AsmOperandInfoVector TargetConstraints = 863 TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI); 864 865 const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0]; 866 867 int OutputIdx = 0; 868 for (auto &TC : TargetConstraints) { 869 if (TC.Type != InlineAsm::isOutput) 870 continue; 871 872 // Skip outputs we don't care about. 873 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++) 874 continue; 875 876 TLI->ComputeConstraintToUse(TC, SDValue()); 877 878 Register AssignedReg; 879 const TargetRegisterClass *RC; 880 std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint( 881 TRI, TC.ConstraintCode, TC.ConstraintVT); 882 if (AssignedReg) { 883 // FIXME: This is a workaround for getRegForInlineAsmConstraint 884 // returning VS_32 885 RC = TRI->getPhysRegClass(AssignedReg); 886 } 887 888 // For AGPR constraints null is returned on subtargets without AGPRs, so 889 // assume divergent for null. 890 if (!RC || !TRI->isSGPRClass(RC)) 891 return true; 892 } 893 894 return false; 895 } 896 897 /// \returns true if the new GPU divergence analysis is enabled. 898 bool GCNTTIImpl::useGPUDivergenceAnalysis() const { 899 return !UseLegacyDA; 900 } 901 902 /// \returns true if the result of the value could potentially be 903 /// different across workitems in a wavefront. 904 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const { 905 if (const Argument *A = dyn_cast<Argument>(V)) 906 return !AMDGPU::isArgPassedInSGPR(A); 907 908 // Loads from the private and flat address spaces are divergent, because 909 // threads can execute the load instruction with the same inputs and get 910 // different results. 911 // 912 // All other loads are not divergent, because if threads issue loads with the 913 // same arguments, they will always get the same result. 914 if (const LoadInst *Load = dyn_cast<LoadInst>(V)) 915 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS || 916 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS; 917 918 // Atomics are divergent because they are executed sequentially: when an 919 // atomic operation refers to the same address in each thread, then each 920 // thread after the first sees the value written by the previous thread as 921 // original value. 922 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V)) 923 return true; 924 925 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) 926 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID()); 927 928 // Assume all function calls are a source of divergence. 929 if (const CallInst *CI = dyn_cast<CallInst>(V)) { 930 if (CI->isInlineAsm()) 931 return isInlineAsmSourceOfDivergence(CI); 932 return true; 933 } 934 935 // Assume all function calls are a source of divergence. 936 if (isa<InvokeInst>(V)) 937 return true; 938 939 return false; 940 } 941 942 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const { 943 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { 944 switch (Intrinsic->getIntrinsicID()) { 945 default: 946 return false; 947 case Intrinsic::amdgcn_readfirstlane: 948 case Intrinsic::amdgcn_readlane: 949 case Intrinsic::amdgcn_icmp: 950 case Intrinsic::amdgcn_fcmp: 951 case Intrinsic::amdgcn_ballot: 952 case Intrinsic::amdgcn_if_break: 953 return true; 954 } 955 } 956 957 if (const CallInst *CI = dyn_cast<CallInst>(V)) { 958 if (CI->isInlineAsm()) 959 return !isInlineAsmSourceOfDivergence(CI); 960 return false; 961 } 962 963 const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V); 964 if (!ExtValue) 965 return false; 966 967 const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0)); 968 if (!CI) 969 return false; 970 971 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) { 972 switch (Intrinsic->getIntrinsicID()) { 973 default: 974 return false; 975 case Intrinsic::amdgcn_if: 976 case Intrinsic::amdgcn_else: { 977 ArrayRef<unsigned> Indices = ExtValue->getIndices(); 978 return Indices.size() == 1 && Indices[0] == 1; 979 } 980 } 981 } 982 983 // If we have inline asm returning mixed SGPR and VGPR results, we inferred 984 // divergent for the overall struct return. We need to override it in the 985 // case we're extracting an SGPR component here. 986 if (CI->isInlineAsm()) 987 return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices()); 988 989 return false; 990 } 991 992 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 993 Intrinsic::ID IID) const { 994 switch (IID) { 995 case Intrinsic::amdgcn_atomic_inc: 996 case Intrinsic::amdgcn_atomic_dec: 997 case Intrinsic::amdgcn_ds_fadd: 998 case Intrinsic::amdgcn_ds_fmin: 999 case Intrinsic::amdgcn_ds_fmax: 1000 case Intrinsic::amdgcn_is_shared: 1001 case Intrinsic::amdgcn_is_private: 1002 OpIndexes.push_back(0); 1003 return true; 1004 default: 1005 return false; 1006 } 1007 } 1008 1009 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, 1010 Value *OldV, 1011 Value *NewV) const { 1012 auto IntrID = II->getIntrinsicID(); 1013 switch (IntrID) { 1014 case Intrinsic::amdgcn_atomic_inc: 1015 case Intrinsic::amdgcn_atomic_dec: 1016 case Intrinsic::amdgcn_ds_fadd: 1017 case Intrinsic::amdgcn_ds_fmin: 1018 case Intrinsic::amdgcn_ds_fmax: { 1019 const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4)); 1020 if (!IsVolatile->isZero()) 1021 return nullptr; 1022 Module *M = II->getParent()->getParent()->getParent(); 1023 Type *DestTy = II->getType(); 1024 Type *SrcTy = NewV->getType(); 1025 Function *NewDecl = 1026 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy}); 1027 II->setArgOperand(0, NewV); 1028 II->setCalledFunction(NewDecl); 1029 return II; 1030 } 1031 case Intrinsic::amdgcn_is_shared: 1032 case Intrinsic::amdgcn_is_private: { 1033 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ? 1034 AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; 1035 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1036 LLVMContext &Ctx = NewV->getType()->getContext(); 1037 ConstantInt *NewVal = (TrueAS == NewAS) ? 1038 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx); 1039 return NewVal; 1040 } 1041 case Intrinsic::ptrmask: { 1042 unsigned OldAS = OldV->getType()->getPointerAddressSpace(); 1043 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1044 Value *MaskOp = II->getArgOperand(1); 1045 Type *MaskTy = MaskOp->getType(); 1046 1047 bool DoTruncate = false; 1048 1049 const GCNTargetMachine &TM = 1050 static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine()); 1051 if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) { 1052 // All valid 64-bit to 32-bit casts work by chopping off the high 1053 // bits. Any masking only clearing the low bits will also apply in the new 1054 // address space. 1055 if (DL.getPointerSizeInBits(OldAS) != 64 || 1056 DL.getPointerSizeInBits(NewAS) != 32) 1057 return nullptr; 1058 1059 // TODO: Do we need to thread more context in here? 1060 KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II); 1061 if (Known.countMinLeadingOnes() < 32) 1062 return nullptr; 1063 1064 DoTruncate = true; 1065 } 1066 1067 IRBuilder<> B(II); 1068 if (DoTruncate) { 1069 MaskTy = B.getInt32Ty(); 1070 MaskOp = B.CreateTrunc(MaskOp, MaskTy); 1071 } 1072 1073 return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy}, 1074 {NewV, MaskOp}); 1075 } 1076 default: 1077 return nullptr; 1078 } 1079 } 1080 1081 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *VT, 1082 int Index, VectorType *SubTp) { 1083 if (ST->hasVOP3PInsts()) { 1084 if (cast<FixedVectorType>(VT)->getNumElements() == 2 && 1085 DL.getTypeSizeInBits(VT->getElementType()) == 16) { 1086 // With op_sel VOP3P instructions freely can access the low half or high 1087 // half of a register, so any swizzle is free. 1088 1089 switch (Kind) { 1090 case TTI::SK_Broadcast: 1091 case TTI::SK_Reverse: 1092 case TTI::SK_PermuteSingleSrc: 1093 return 0; 1094 default: 1095 break; 1096 } 1097 } 1098 } 1099 1100 return BaseT::getShuffleCost(Kind, VT, Index, SubTp); 1101 } 1102 1103 bool GCNTTIImpl::areInlineCompatible(const Function *Caller, 1104 const Function *Callee) const { 1105 const TargetMachine &TM = getTLI()->getTargetMachine(); 1106 const GCNSubtarget *CallerST 1107 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller)); 1108 const GCNSubtarget *CalleeST 1109 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee)); 1110 1111 const FeatureBitset &CallerBits = CallerST->getFeatureBits(); 1112 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits(); 1113 1114 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 1115 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 1116 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits) 1117 return false; 1118 1119 // FIXME: dx10_clamp can just take the caller setting, but there seems to be 1120 // no way to support merge for backend defined attributes. 1121 AMDGPU::SIModeRegisterDefaults CallerMode(*Caller); 1122 AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee); 1123 return CallerMode.isInlineCompatible(CalleeMode); 1124 } 1125 1126 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1127 TTI::UnrollingPreferences &UP) { 1128 CommonTTI.getUnrollingPreferences(L, SE, UP); 1129 } 1130 1131 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1132 TTI::PeelingPreferences &PP) { 1133 CommonTTI.getPeelingPreferences(L, SE, PP); 1134 } 1135 1136 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const { 1137 return ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind) 1138 : getQuarterRateInstrCost(CostKind); 1139 } 1140 1141 R600TTIImpl::R600TTIImpl(const AMDGPUTargetMachine *TM, const Function &F) 1142 : BaseT(TM, F.getParent()->getDataLayout()), 1143 ST(static_cast<const R600Subtarget *>(TM->getSubtargetImpl(F))), 1144 TLI(ST->getTargetLowering()), CommonTTI(TM, F) {} 1145 1146 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const { 1147 return 4 * 128; // XXX - 4 channels. Should these count as vector instead? 1148 } 1149 1150 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const { 1151 return getHardwareNumberOfRegisters(Vec); 1152 } 1153 1154 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const { 1155 return 32; 1156 } 1157 1158 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const { 1159 return 32; 1160 } 1161 1162 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { 1163 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS || 1164 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) 1165 return 128; 1166 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 1167 AddrSpace == AMDGPUAS::REGION_ADDRESS) 1168 return 64; 1169 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) 1170 return 32; 1171 1172 if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS || 1173 AddrSpace == AMDGPUAS::PARAM_I_ADDRESS || 1174 (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 && 1175 AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15))) 1176 return 128; 1177 llvm_unreachable("unhandled address space"); 1178 } 1179 1180 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, 1181 Align Alignment, 1182 unsigned AddrSpace) const { 1183 // We allow vectorization of flat stores, even though we may need to decompose 1184 // them later if they may access private memory. We don't have enough context 1185 // here, and legalization can handle it. 1186 return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS); 1187 } 1188 1189 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 1190 Align Alignment, 1191 unsigned AddrSpace) const { 1192 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 1193 } 1194 1195 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 1196 Align Alignment, 1197 unsigned AddrSpace) const { 1198 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); 1199 } 1200 1201 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) { 1202 // Disable unrolling if the loop is not vectorized. 1203 // TODO: Enable this again. 1204 if (VF == 1) 1205 return 1; 1206 1207 return 8; 1208 } 1209 1210 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode, 1211 TTI::TargetCostKind CostKind) { 1212 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 1213 return Opcode == Instruction::PHI ? 0 : 1; 1214 1215 // XXX - For some reason this isn't called for switch. 1216 switch (Opcode) { 1217 case Instruction::Br: 1218 case Instruction::Ret: 1219 return 10; 1220 default: 1221 return BaseT::getCFInstrCost(Opcode, CostKind); 1222 } 1223 } 1224 1225 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 1226 unsigned Index) { 1227 switch (Opcode) { 1228 case Instruction::ExtractElement: 1229 case Instruction::InsertElement: { 1230 unsigned EltSize 1231 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType()); 1232 if (EltSize < 32) { 1233 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 1234 } 1235 1236 // Extracts are just reads of a subregister, so are free. Inserts are 1237 // considered free because we don't want to have any cost for scalarizing 1238 // operations, and we don't have to copy into a different register class. 1239 1240 // Dynamic indexing isn't free and is best avoided. 1241 return Index == ~0u ? 2 : 0; 1242 } 1243 default: 1244 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 1245 } 1246 } 1247 1248 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1249 TTI::UnrollingPreferences &UP) { 1250 CommonTTI.getUnrollingPreferences(L, SE, UP); 1251 } 1252 1253 void R600TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1254 TTI::PeelingPreferences &PP) { 1255 CommonTTI.getPeelingPreferences(L, SE, PP); 1256 } 1257