1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass eliminates allocas by either converting them into vectors or 11 // by migrating them to local address space. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "Utils/AMDGPUBaseInfo.h" 18 #include "llvm/ADT/APInt.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/CodeGen/TargetPassConfig.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/Constant.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/GlobalValue.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Metadata.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/IR/User.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/MathExtras.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Target/TargetMachine.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <cstdint> 57 #include <map> 58 #include <tuple> 59 #include <utility> 60 #include <vector> 61 62 #define DEBUG_TYPE "amdgpu-promote-alloca" 63 64 using namespace llvm; 65 66 namespace { 67 68 // FIXME: This can create globals so should be a module pass. 69 class AMDGPUPromoteAlloca : public FunctionPass { 70 private: 71 const TargetMachine *TM; 72 Module *Mod = nullptr; 73 const DataLayout *DL = nullptr; 74 AMDGPUAS AS; 75 76 // FIXME: This should be per-kernel. 77 uint32_t LocalMemLimit = 0; 78 uint32_t CurrentLocalMemUsage = 0; 79 80 bool IsAMDGCN = false; 81 bool IsAMDHSA = false; 82 83 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 84 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 85 86 /// BaseAlloca is the alloca root the search started from. 87 /// Val may be that alloca or a recursive user of it. 88 bool collectUsesWithPtrTypes(Value *BaseAlloca, 89 Value *Val, 90 std::vector<Value*> &WorkList) const; 91 92 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 93 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 94 /// Returns true if both operands are derived from the same alloca. Val should 95 /// be the same value as one of the input operands of UseInst. 96 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 97 Instruction *UseInst, 98 int OpIdx0, int OpIdx1) const; 99 100 /// Check whether we have enough local memory for promotion. 101 bool hasSufficientLocalMem(const Function &F); 102 103 public: 104 static char ID; 105 106 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 107 108 bool doInitialization(Module &M) override; 109 bool runOnFunction(Function &F) override; 110 111 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 112 113 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 114 115 void getAnalysisUsage(AnalysisUsage &AU) const override { 116 AU.setPreservesCFG(); 117 FunctionPass::getAnalysisUsage(AU); 118 } 119 }; 120 121 } // end anonymous namespace 122 123 char AMDGPUPromoteAlloca::ID = 0; 124 125 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE, 126 "AMDGPU promote alloca to vector or LDS", false, false) 127 128 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 129 130 bool AMDGPUPromoteAlloca::doInitialization(Module &M) { 131 Mod = &M; 132 DL = &Mod->getDataLayout(); 133 134 return false; 135 } 136 137 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 138 if (skipFunction(F)) 139 return false; 140 141 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) 142 TM = &TPC->getTM<TargetMachine>(); 143 else 144 return false; 145 146 const Triple &TT = TM->getTargetTriple(); 147 IsAMDGCN = TT.getArch() == Triple::amdgcn; 148 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 149 150 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); 151 if (!ST.isPromoteAllocaEnabled()) 152 return false; 153 154 AS = AMDGPU::getAMDGPUAS(*F.getParent()); 155 156 bool SufficientLDS = hasSufficientLocalMem(F); 157 bool Changed = false; 158 BasicBlock &EntryBB = *F.begin(); 159 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) { 160 AllocaInst *AI = dyn_cast<AllocaInst>(I); 161 162 ++I; 163 if (AI) 164 Changed |= handleAlloca(*AI, SufficientLDS); 165 } 166 167 return Changed; 168 } 169 170 std::pair<Value *, Value *> 171 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) { 172 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>( 173 *Builder.GetInsertBlock()->getParent()); 174 175 if (!IsAMDHSA) { 176 Function *LocalSizeYFn 177 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 178 Function *LocalSizeZFn 179 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 180 181 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 182 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 183 184 ST.makeLIDRangeMetadata(LocalSizeY); 185 ST.makeLIDRangeMetadata(LocalSizeZ); 186 187 return std::make_pair(LocalSizeY, LocalSizeZ); 188 } 189 190 // We must read the size out of the dispatch pointer. 191 assert(IsAMDGCN); 192 193 // We are indexing into this struct, and want to extract the workgroup_size_* 194 // fields. 195 // 196 // typedef struct hsa_kernel_dispatch_packet_s { 197 // uint16_t header; 198 // uint16_t setup; 199 // uint16_t workgroup_size_x ; 200 // uint16_t workgroup_size_y; 201 // uint16_t workgroup_size_z; 202 // uint16_t reserved0; 203 // uint32_t grid_size_x ; 204 // uint32_t grid_size_y ; 205 // uint32_t grid_size_z; 206 // 207 // uint32_t private_segment_size; 208 // uint32_t group_segment_size; 209 // uint64_t kernel_object; 210 // 211 // #ifdef HSA_LARGE_MODEL 212 // void *kernarg_address; 213 // #elif defined HSA_LITTLE_ENDIAN 214 // void *kernarg_address; 215 // uint32_t reserved1; 216 // #else 217 // uint32_t reserved1; 218 // void *kernarg_address; 219 // #endif 220 // uint64_t reserved2; 221 // hsa_signal_t completion_signal; // uint64_t wrapper 222 // } hsa_kernel_dispatch_packet_t 223 // 224 Function *DispatchPtrFn 225 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 226 227 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 228 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 229 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 230 231 // Size of the dispatch packet struct. 232 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64); 233 234 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 235 Value *CastDispatchPtr = Builder.CreateBitCast( 236 DispatchPtr, PointerType::get(I32Ty, AS.CONSTANT_ADDRESS)); 237 238 // We could do a single 64-bit load here, but it's likely that the basic 239 // 32-bit and extract sequence is already present, and it is probably easier 240 // to CSE this. The loads should be mergable later anyway. 241 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1); 242 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4); 243 244 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2); 245 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4); 246 247 MDNode *MD = MDNode::get(Mod->getContext(), None); 248 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 249 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 250 ST.makeLIDRangeMetadata(LoadZU); 251 252 // Extract y component. Upper half of LoadZU should be zero already. 253 Value *Y = Builder.CreateLShr(LoadXY, 16); 254 255 return std::make_pair(Y, LoadZU); 256 } 257 258 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) { 259 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>( 260 *Builder.GetInsertBlock()->getParent()); 261 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic; 262 263 switch (N) { 264 case 0: 265 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x 266 : Intrinsic::r600_read_tidig_x; 267 break; 268 case 1: 269 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y 270 : Intrinsic::r600_read_tidig_y; 271 break; 272 273 case 2: 274 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z 275 : Intrinsic::r600_read_tidig_z; 276 break; 277 default: 278 llvm_unreachable("invalid dimension"); 279 } 280 281 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 282 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 283 ST.makeLIDRangeMetadata(CI); 284 285 return CI; 286 } 287 288 static VectorType *arrayTypeToVecType(Type *ArrayTy) { 289 return VectorType::get(ArrayTy->getArrayElementType(), 290 ArrayTy->getArrayNumElements()); 291 } 292 293 static Value * 294 calculateVectorIndex(Value *Ptr, 295 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 296 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr); 297 298 auto I = GEPIdx.find(GEP); 299 return I == GEPIdx.end() ? nullptr : I->second; 300 } 301 302 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { 303 // FIXME we only support simple cases 304 if (GEP->getNumOperands() != 3) 305 return nullptr; 306 307 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 308 if (!I0 || !I0->isZero()) 309 return nullptr; 310 311 return GEP->getOperand(2); 312 } 313 314 // Not an instruction handled below to turn into a vector. 315 // 316 // TODO: Check isTriviallyVectorizable for calls and handle other 317 // instructions. 318 static bool canVectorizeInst(Instruction *Inst, User *User) { 319 switch (Inst->getOpcode()) { 320 case Instruction::Load: { 321 LoadInst *LI = cast<LoadInst>(Inst); 322 // Currently only handle the case where the Pointer Operand is a GEP so check for that case. 323 return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile(); 324 } 325 case Instruction::BitCast: 326 case Instruction::AddrSpaceCast: 327 return true; 328 case Instruction::Store: { 329 // Must be the stored pointer operand, not a stored value, plus 330 // since it should be canonical form, the User should be a GEP. 331 StoreInst *SI = cast<StoreInst>(Inst); 332 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile(); 333 } 334 default: 335 return false; 336 } 337 } 338 339 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) { 340 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType()); 341 342 DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 343 344 // FIXME: There is no reason why we can't support larger arrays, we 345 // are just being conservative for now. 346 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 347 // could also be promoted but we don't currently handle this case 348 if (!AllocaTy || 349 AllocaTy->getElementType()->isVectorTy() || 350 AllocaTy->getElementType()->isArrayTy() || 351 AllocaTy->getNumElements() > 4 || 352 AllocaTy->getNumElements() < 2) { 353 DEBUG(dbgs() << " Cannot convert type to vector\n"); 354 return false; 355 } 356 357 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 358 std::vector<Value*> WorkList; 359 for (User *AllocaUser : Alloca->users()) { 360 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); 361 if (!GEP) { 362 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca)) 363 return false; 364 365 WorkList.push_back(AllocaUser); 366 continue; 367 } 368 369 Value *Index = GEPToVectorIndex(GEP); 370 371 // If we can't compute a vector index from this GEP, then we can't 372 // promote this alloca to vector. 373 if (!Index) { 374 DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n'); 375 return false; 376 } 377 378 GEPVectorIdx[GEP] = Index; 379 for (User *GEPUser : AllocaUser->users()) { 380 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser)) 381 return false; 382 383 WorkList.push_back(GEPUser); 384 } 385 } 386 387 VectorType *VectorTy = arrayTypeToVecType(AllocaTy); 388 389 DEBUG(dbgs() << " Converting alloca to vector " 390 << *AllocaTy << " -> " << *VectorTy << '\n'); 391 392 for (Value *V : WorkList) { 393 Instruction *Inst = cast<Instruction>(V); 394 IRBuilder<> Builder(Inst); 395 switch (Inst->getOpcode()) { 396 case Instruction::Load: { 397 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS); 398 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 399 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 400 401 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 402 Value *VecValue = Builder.CreateLoad(BitCast); 403 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 404 Inst->replaceAllUsesWith(ExtractElement); 405 Inst->eraseFromParent(); 406 break; 407 } 408 case Instruction::Store: { 409 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS); 410 411 StoreInst *SI = cast<StoreInst>(Inst); 412 Value *Ptr = SI->getPointerOperand(); 413 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 414 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 415 Value *VecValue = Builder.CreateLoad(BitCast); 416 Value *NewVecValue = Builder.CreateInsertElement(VecValue, 417 SI->getValueOperand(), 418 Index); 419 Builder.CreateStore(NewVecValue, BitCast); 420 Inst->eraseFromParent(); 421 break; 422 } 423 case Instruction::BitCast: 424 case Instruction::AddrSpaceCast: 425 break; 426 427 default: 428 llvm_unreachable("Inconsistency in instructions promotable to vector"); 429 } 430 } 431 return true; 432 } 433 434 static bool isCallPromotable(CallInst *CI) { 435 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 436 if (!II) 437 return false; 438 439 switch (II->getIntrinsicID()) { 440 case Intrinsic::memcpy: 441 case Intrinsic::memmove: 442 case Intrinsic::memset: 443 case Intrinsic::lifetime_start: 444 case Intrinsic::lifetime_end: 445 case Intrinsic::invariant_start: 446 case Intrinsic::invariant_end: 447 case Intrinsic::invariant_group_barrier: 448 case Intrinsic::objectsize: 449 return true; 450 default: 451 return false; 452 } 453 } 454 455 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca, 456 Value *Val, 457 Instruction *Inst, 458 int OpIdx0, 459 int OpIdx1) const { 460 // Figure out which operand is the one we might not be promoting. 461 Value *OtherOp = Inst->getOperand(OpIdx0); 462 if (Val == OtherOp) 463 OtherOp = Inst->getOperand(OpIdx1); 464 465 if (isa<ConstantPointerNull>(OtherOp)) 466 return true; 467 468 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL); 469 if (!isa<AllocaInst>(OtherObj)) 470 return false; 471 472 // TODO: We should be able to replace undefs with the right pointer type. 473 474 // TODO: If we know the other base object is another promotable 475 // alloca, not necessarily this alloca, we can do this. The 476 // important part is both must have the same address space at 477 // the end. 478 if (OtherObj != BaseAlloca) { 479 DEBUG(dbgs() << "Found a binary instruction with another alloca object\n"); 480 return false; 481 } 482 483 return true; 484 } 485 486 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes( 487 Value *BaseAlloca, 488 Value *Val, 489 std::vector<Value*> &WorkList) const { 490 491 for (User *User : Val->users()) { 492 if (is_contained(WorkList, User)) 493 continue; 494 495 if (CallInst *CI = dyn_cast<CallInst>(User)) { 496 if (!isCallPromotable(CI)) 497 return false; 498 499 WorkList.push_back(User); 500 continue; 501 } 502 503 Instruction *UseInst = cast<Instruction>(User); 504 if (UseInst->getOpcode() == Instruction::PtrToInt) 505 return false; 506 507 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 508 if (LI->isVolatile()) 509 return false; 510 511 continue; 512 } 513 514 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 515 if (SI->isVolatile()) 516 return false; 517 518 // Reject if the stored value is not the pointer operand. 519 if (SI->getPointerOperand() != Val) 520 return false; 521 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 522 if (RMW->isVolatile()) 523 return false; 524 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 525 if (CAS->isVolatile()) 526 return false; 527 } 528 529 // Only promote a select if we know that the other select operand 530 // is from another pointer that will also be promoted. 531 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 532 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 533 return false; 534 535 // May need to rewrite constant operands. 536 WorkList.push_back(ICmp); 537 } 538 539 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 540 // Give up if the pointer may be captured. 541 if (PointerMayBeCaptured(UseInst, true, true)) 542 return false; 543 // Don't collect the users of this. 544 WorkList.push_back(User); 545 continue; 546 } 547 548 if (!User->getType()->isPointerTy()) 549 continue; 550 551 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 552 // Be conservative if an address could be computed outside the bounds of 553 // the alloca. 554 if (!GEP->isInBounds()) 555 return false; 556 } 557 558 // Only promote a select if we know that the other select operand is from 559 // another pointer that will also be promoted. 560 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 561 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 562 return false; 563 } 564 565 // Repeat for phis. 566 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 567 // TODO: Handle more complex cases. We should be able to replace loops 568 // over arrays. 569 switch (Phi->getNumIncomingValues()) { 570 case 1: 571 break; 572 case 2: 573 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 574 return false; 575 break; 576 default: 577 return false; 578 } 579 } 580 581 WorkList.push_back(User); 582 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 583 return false; 584 } 585 586 return true; 587 } 588 589 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) { 590 591 FunctionType *FTy = F.getFunctionType(); 592 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); 593 594 // If the function has any arguments in the local address space, then it's 595 // possible these arguments require the entire local memory space, so 596 // we cannot use local memory in the pass. 597 for (Type *ParamTy : FTy->params()) { 598 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 599 if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) { 600 LocalMemLimit = 0; 601 DEBUG(dbgs() << "Function has local memory argument. Promoting to " 602 "local memory disabled.\n"); 603 return false; 604 } 605 } 606 607 LocalMemLimit = ST.getLocalMemorySize(); 608 if (LocalMemLimit == 0) 609 return false; 610 611 const DataLayout &DL = Mod->getDataLayout(); 612 613 // Check how much local memory is being used by global objects 614 CurrentLocalMemUsage = 0; 615 for (GlobalVariable &GV : Mod->globals()) { 616 if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS) 617 continue; 618 619 for (const User *U : GV.users()) { 620 const Instruction *Use = dyn_cast<Instruction>(U); 621 if (!Use) 622 continue; 623 624 if (Use->getParent()->getParent() == &F) { 625 unsigned Align = GV.getAlignment(); 626 if (Align == 0) 627 Align = DL.getABITypeAlignment(GV.getValueType()); 628 629 // FIXME: Try to account for padding here. The padding is currently 630 // determined from the inverse order of uses in the function. I'm not 631 // sure if the use list order is in any way connected to this, so the 632 // total reported size is likely incorrect. 633 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType()); 634 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align); 635 CurrentLocalMemUsage += AllocSize; 636 break; 637 } 638 } 639 } 640 641 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 642 F); 643 644 // Restrict local memory usage so that we don't drastically reduce occupancy, 645 // unless it is already significantly reduced. 646 647 // TODO: Have some sort of hint or other heuristics to guess occupancy based 648 // on other factors.. 649 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 650 if (OccupancyHint == 0) 651 OccupancyHint = 7; 652 653 // Clamp to max value. 654 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 655 656 // Check the hint but ignore it if it's obviously wrong from the existing LDS 657 // usage. 658 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 659 660 661 // Round up to the next tier of usage. 662 unsigned MaxSizeWithWaveCount 663 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 664 665 // Program is possibly broken by using more local mem than available. 666 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 667 return false; 668 669 LocalMemLimit = MaxSizeWithWaveCount; 670 671 DEBUG( 672 dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n" 673 << " Rounding size to " << MaxSizeWithWaveCount 674 << " with a maximum occupancy of " << MaxOccupancy << '\n' 675 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 676 << " available for promotion\n" 677 ); 678 679 return true; 680 } 681 682 // FIXME: Should try to pick the most likely to be profitable allocas first. 683 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) { 684 // Array allocations are probably not worth handling, since an allocation of 685 // the array type is the canonical form. 686 if (!I.isStaticAlloca() || I.isArrayAllocation()) 687 return false; 688 689 IRBuilder<> Builder(&I); 690 691 // First try to replace the alloca with a vector 692 Type *AllocaTy = I.getAllocatedType(); 693 694 DEBUG(dbgs() << "Trying to promote " << I << '\n'); 695 696 if (tryPromoteAllocaToVector(&I, AS)) 697 return true; // Promoted to vector. 698 699 const Function &ContainingFunction = *I.getParent()->getParent(); 700 CallingConv::ID CC = ContainingFunction.getCallingConv(); 701 702 // Don't promote the alloca to LDS for shader calling conventions as the work 703 // item ID intrinsics are not supported for these calling conventions. 704 // Furthermore not all LDS is available for some of the stages. 705 switch (CC) { 706 case CallingConv::AMDGPU_KERNEL: 707 case CallingConv::SPIR_KERNEL: 708 break; 709 default: 710 DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n"); 711 return false; 712 } 713 714 // Not likely to have sufficient local memory for promotion. 715 if (!SufficientLDS) 716 return false; 717 718 const AMDGPUSubtarget &ST = 719 TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction); 720 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 721 722 const DataLayout &DL = Mod->getDataLayout(); 723 724 unsigned Align = I.getAlignment(); 725 if (Align == 0) 726 Align = DL.getABITypeAlignment(I.getAllocatedType()); 727 728 // FIXME: This computed padding is likely wrong since it depends on inverse 729 // usage order. 730 // 731 // FIXME: It is also possible that if we're allowed to use all of the memory 732 // could could end up using more than the maximum due to alignment padding. 733 734 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align); 735 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 736 NewSize += AllocSize; 737 738 if (NewSize > LocalMemLimit) { 739 DEBUG(dbgs() << " " << AllocSize 740 << " bytes of local memory not available to promote\n"); 741 return false; 742 } 743 744 CurrentLocalMemUsage = NewSize; 745 746 std::vector<Value*> WorkList; 747 748 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 749 DEBUG(dbgs() << " Do not know how to convert all uses\n"); 750 return false; 751 } 752 753 DEBUG(dbgs() << "Promoting alloca to local memory\n"); 754 755 Function *F = I.getParent()->getParent(); 756 757 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 758 GlobalVariable *GV = new GlobalVariable( 759 *Mod, GVTy, false, GlobalValue::InternalLinkage, 760 UndefValue::get(GVTy), 761 Twine(F->getName()) + Twine('.') + I.getName(), 762 nullptr, 763 GlobalVariable::NotThreadLocal, 764 AS.LOCAL_ADDRESS); 765 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 766 GV->setAlignment(I.getAlignment()); 767 768 Value *TCntY, *TCntZ; 769 770 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 771 Value *TIdX = getWorkitemID(Builder, 0); 772 Value *TIdY = getWorkitemID(Builder, 1); 773 Value *TIdZ = getWorkitemID(Builder, 2); 774 775 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 776 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 777 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 778 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 779 TID = Builder.CreateAdd(TID, TIdZ); 780 781 Value *Indices[] = { 782 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 783 TID 784 }; 785 786 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 787 I.mutateType(Offset->getType()); 788 I.replaceAllUsesWith(Offset); 789 I.eraseFromParent(); 790 791 for (Value *V : WorkList) { 792 CallInst *Call = dyn_cast<CallInst>(V); 793 if (!Call) { 794 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 795 Value *Src0 = CI->getOperand(0); 796 Type *EltTy = Src0->getType()->getPointerElementType(); 797 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS); 798 799 if (isa<ConstantPointerNull>(CI->getOperand(0))) 800 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 801 802 if (isa<ConstantPointerNull>(CI->getOperand(1))) 803 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 804 805 continue; 806 } 807 808 // The operand's value should be corrected on its own and we don't want to 809 // touch the users. 810 if (isa<AddrSpaceCastInst>(V)) 811 continue; 812 813 Type *EltTy = V->getType()->getPointerElementType(); 814 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS); 815 816 // FIXME: It doesn't really make sense to try to do this for all 817 // instructions. 818 V->mutateType(NewTy); 819 820 // Adjust the types of any constant operands. 821 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 822 if (isa<ConstantPointerNull>(SI->getOperand(1))) 823 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 824 825 if (isa<ConstantPointerNull>(SI->getOperand(2))) 826 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 827 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 828 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 829 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 830 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 831 } 832 } 833 834 continue; 835 } 836 837 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 838 Builder.SetInsertPoint(Intr); 839 switch (Intr->getIntrinsicID()) { 840 case Intrinsic::lifetime_start: 841 case Intrinsic::lifetime_end: 842 // These intrinsics are for address space 0 only 843 Intr->eraseFromParent(); 844 continue; 845 case Intrinsic::memcpy: { 846 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); 847 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), 848 MemCpy->getLength(), MemCpy->getAlignment(), 849 MemCpy->isVolatile()); 850 Intr->eraseFromParent(); 851 continue; 852 } 853 case Intrinsic::memmove: { 854 MemMoveInst *MemMove = cast<MemMoveInst>(Intr); 855 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(), 856 MemMove->getLength(), MemMove->getAlignment(), 857 MemMove->isVolatile()); 858 Intr->eraseFromParent(); 859 continue; 860 } 861 case Intrinsic::memset: { 862 MemSetInst *MemSet = cast<MemSetInst>(Intr); 863 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), 864 MemSet->getLength(), MemSet->getAlignment(), 865 MemSet->isVolatile()); 866 Intr->eraseFromParent(); 867 continue; 868 } 869 case Intrinsic::invariant_start: 870 case Intrinsic::invariant_end: 871 case Intrinsic::invariant_group_barrier: 872 Intr->eraseFromParent(); 873 // FIXME: I think the invariant marker should still theoretically apply, 874 // but the intrinsics need to be changed to accept pointers with any 875 // address space. 876 continue; 877 case Intrinsic::objectsize: { 878 Value *Src = Intr->getOperand(0); 879 Type *SrcTy = Src->getType()->getPointerElementType(); 880 Function *ObjectSize = Intrinsic::getDeclaration(Mod, 881 Intrinsic::objectsize, 882 { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) } 883 ); 884 885 CallInst *NewCall = Builder.CreateCall( 886 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)}); 887 Intr->replaceAllUsesWith(NewCall); 888 Intr->eraseFromParent(); 889 continue; 890 } 891 default: 892 Intr->print(errs()); 893 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 894 } 895 } 896 return true; 897 } 898 899 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 900 return new AMDGPUPromoteAlloca(); 901 } 902