1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass eliminates allocas by either converting them into vectors or
11 // by migrating them to local address space.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/MDBuilder.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 
24 #define DEBUG_TYPE "amdgpu-promote-alloca"
25 
26 using namespace llvm;
27 
28 namespace {
29 
30 // FIXME: This can create globals so should be a module pass.
31 class AMDGPUPromoteAlloca : public FunctionPass {
32 private:
33   const TargetMachine *TM;
34   Module *Mod;
35   const DataLayout *DL;
36   MDNode *MaxWorkGroupSizeRange;
37 
38   // FIXME: This should be per-kernel.
39   uint32_t LocalMemLimit;
40   uint32_t CurrentLocalMemUsage;
41 
42   bool IsAMDGCN;
43   bool IsAMDHSA;
44 
45   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
46   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
47 
48   /// BaseAlloca is the alloca root the search started from.
49   /// Val may be that alloca or a recursive user of it.
50   bool collectUsesWithPtrTypes(Value *BaseAlloca,
51                                Value *Val,
52                                std::vector<Value*> &WorkList) const;
53 
54   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
55   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
56   /// Returns true if both operands are derived from the same alloca. Val should
57   /// be the same value as one of the input operands of UseInst.
58   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
59                                        Instruction *UseInst,
60                                        int OpIdx0, int OpIdx1) const;
61 
62 public:
63   static char ID;
64 
65   AMDGPUPromoteAlloca(const TargetMachine *TM_ = nullptr) :
66     FunctionPass(ID),
67     TM(TM_),
68     Mod(nullptr),
69     DL(nullptr),
70     MaxWorkGroupSizeRange(nullptr),
71     LocalMemLimit(0),
72     CurrentLocalMemUsage(0),
73     IsAMDGCN(false),
74     IsAMDHSA(false) { }
75 
76   bool doInitialization(Module &M) override;
77   bool runOnFunction(Function &F) override;
78 
79   const char *getPassName() const override {
80     return "AMDGPU Promote Alloca";
81   }
82 
83   void handleAlloca(AllocaInst &I);
84 
85   void getAnalysisUsage(AnalysisUsage &AU) const override {
86     AU.setPreservesCFG();
87     FunctionPass::getAnalysisUsage(AU);
88   }
89 };
90 
91 } // End anonymous namespace
92 
93 char AMDGPUPromoteAlloca::ID = 0;
94 
95 INITIALIZE_TM_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
96                    "AMDGPU promote alloca to vector or LDS", false, false)
97 
98 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
99 
100 
101 bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
102   if (!TM)
103     return false;
104 
105   Mod = &M;
106   DL = &Mod->getDataLayout();
107 
108   // The maximum workitem id.
109   //
110   // FIXME: Should get as subtarget property. Usually runtime enforced max is
111   // 256.
112   MDBuilder MDB(Mod->getContext());
113   MaxWorkGroupSizeRange = MDB.createRange(APInt(32, 0), APInt(32, 2048));
114 
115   const Triple &TT = TM->getTargetTriple();
116 
117   IsAMDGCN = TT.getArch() == Triple::amdgcn;
118   IsAMDHSA = TT.getOS() == Triple::AMDHSA;
119 
120   return false;
121 }
122 
123 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
124   if (!TM || skipFunction(F))
125     return false;
126 
127   FunctionType *FTy = F.getFunctionType();
128 
129   // If the function has any arguments in the local address space, then it's
130   // possible these arguments require the entire local memory space, so
131   // we cannot use local memory in the pass.
132   for (Type *ParamTy : FTy->params()) {
133     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
134     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
135       LocalMemLimit = 0;
136       DEBUG(dbgs() << "Function has local memory argument. Promoting to "
137                       "local memory disabled.\n");
138       return false;
139     }
140   }
141 
142   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
143 
144   LocalMemLimit = ST.getLocalMemorySize();
145   if (LocalMemLimit == 0)
146     return false;
147 
148   const DataLayout &DL = Mod->getDataLayout();
149 
150   // Check how much local memory is being used by global objects
151   CurrentLocalMemUsage = 0;
152   for (GlobalVariable &GV : Mod->globals()) {
153     if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
154       continue;
155 
156     for (const User *U : GV.users()) {
157       const Instruction *Use = dyn_cast<Instruction>(U);
158       if (!Use)
159         continue;
160 
161       if (Use->getParent()->getParent() == &F) {
162         unsigned Align = GV.getAlignment();
163         if (Align == 0)
164           Align = DL.getABITypeAlignment(GV.getValueType());
165 
166         // FIXME: Try to account for padding here. The padding is currently
167         // determined from the inverse order of uses in the function. I'm not
168         // sure if the use list order is in any way connected to this, so the
169         // total reported size is likely incorrect.
170         uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
171         CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
172         CurrentLocalMemUsage += AllocSize;
173         break;
174       }
175     }
176   }
177 
178   unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage);
179 
180   // Restrict local memory usage so that we don't drastically reduce occupancy,
181   // unless it is already significantly reduced.
182 
183   // TODO: Have some sort of hint or other heuristics to guess occupancy based
184   // on other factors..
185   unsigned OccupancyHint
186     = AMDGPU::getIntegerAttribute(F, "amdgpu-max-waves-per-eu", 0);
187   if (OccupancyHint == 0)
188     OccupancyHint = 7;
189 
190   // Clamp to max value.
191   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerCU());
192 
193   // Check the hint but ignore it if it's obviously wrong from the existing LDS
194   // usage.
195   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
196 
197 
198   // Round up to the next tier of usage.
199   unsigned MaxSizeWithWaveCount
200     = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy);
201 
202   // Program is possibly broken by using more local mem than available.
203   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
204     return false;
205 
206   LocalMemLimit = MaxSizeWithWaveCount;
207 
208   DEBUG(
209     dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
210     << "  Rounding size to " << MaxSizeWithWaveCount
211     << " with a maximum occupancy of " << MaxOccupancy << '\n'
212     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
213     << " available for promotion\n"
214   );
215 
216   BasicBlock &EntryBB = *F.begin();
217   for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
218     AllocaInst *AI = dyn_cast<AllocaInst>(I);
219 
220     ++I;
221     if (AI)
222       handleAlloca(*AI);
223   }
224 
225   return true;
226 }
227 
228 std::pair<Value *, Value *>
229 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
230   if (!IsAMDHSA) {
231     Function *LocalSizeYFn
232       = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
233     Function *LocalSizeZFn
234       = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
235 
236     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
237     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
238 
239     LocalSizeY->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
240     LocalSizeZ->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
241 
242     return std::make_pair(LocalSizeY, LocalSizeZ);
243   }
244 
245   // We must read the size out of the dispatch pointer.
246   assert(IsAMDGCN);
247 
248   // We are indexing into this struct, and want to extract the workgroup_size_*
249   // fields.
250   //
251   //   typedef struct hsa_kernel_dispatch_packet_s {
252   //     uint16_t header;
253   //     uint16_t setup;
254   //     uint16_t workgroup_size_x ;
255   //     uint16_t workgroup_size_y;
256   //     uint16_t workgroup_size_z;
257   //     uint16_t reserved0;
258   //     uint32_t grid_size_x ;
259   //     uint32_t grid_size_y ;
260   //     uint32_t grid_size_z;
261   //
262   //     uint32_t private_segment_size;
263   //     uint32_t group_segment_size;
264   //     uint64_t kernel_object;
265   //
266   // #ifdef HSA_LARGE_MODEL
267   //     void *kernarg_address;
268   // #elif defined HSA_LITTLE_ENDIAN
269   //     void *kernarg_address;
270   //     uint32_t reserved1;
271   // #else
272   //     uint32_t reserved1;
273   //     void *kernarg_address;
274   // #endif
275   //     uint64_t reserved2;
276   //     hsa_signal_t completion_signal; // uint64_t wrapper
277   //   } hsa_kernel_dispatch_packet_t
278   //
279   Function *DispatchPtrFn
280     = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
281 
282   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
283   DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
284   DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
285 
286   // Size of the dispatch packet struct.
287   DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
288 
289   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
290   Value *CastDispatchPtr = Builder.CreateBitCast(
291     DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
292 
293   // We could do a single 64-bit load here, but it's likely that the basic
294   // 32-bit and extract sequence is already present, and it is probably easier
295   // to CSE this. The loads should be mergable later anyway.
296   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
297   LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
298 
299   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
300   LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
301 
302   MDNode *MD = llvm::MDNode::get(Mod->getContext(), None);
303   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
304   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
305   LoadZU->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
306 
307   // Extract y component. Upper half of LoadZU should be zero already.
308   Value *Y = Builder.CreateLShr(LoadXY, 16);
309 
310   return std::make_pair(Y, LoadZU);
311 }
312 
313 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
314   Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
315 
316   switch (N) {
317   case 0:
318     IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
319       : Intrinsic::r600_read_tidig_x;
320     break;
321   case 1:
322     IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
323       : Intrinsic::r600_read_tidig_y;
324     break;
325 
326   case 2:
327     IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
328       : Intrinsic::r600_read_tidig_z;
329     break;
330   default:
331     llvm_unreachable("invalid dimension");
332   }
333 
334   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
335   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
336   CI->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
337 
338   return CI;
339 }
340 
341 static VectorType *arrayTypeToVecType(Type *ArrayTy) {
342   return VectorType::get(ArrayTy->getArrayElementType(),
343                          ArrayTy->getArrayNumElements());
344 }
345 
346 static Value *
347 calculateVectorIndex(Value *Ptr,
348                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
349   if (isa<AllocaInst>(Ptr))
350     return Constant::getNullValue(Type::getInt32Ty(Ptr->getContext()));
351 
352   GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
353 
354   auto I = GEPIdx.find(GEP);
355   return I == GEPIdx.end() ? nullptr : I->second;
356 }
357 
358 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
359   // FIXME we only support simple cases
360   if (GEP->getNumOperands() != 3)
361     return NULL;
362 
363   ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
364   if (!I0 || !I0->isZero())
365     return NULL;
366 
367   return GEP->getOperand(2);
368 }
369 
370 // Not an instruction handled below to turn into a vector.
371 //
372 // TODO: Check isTriviallyVectorizable for calls and handle other
373 // instructions.
374 static bool canVectorizeInst(Instruction *Inst, User *User) {
375   switch (Inst->getOpcode()) {
376   case Instruction::Load:
377   case Instruction::BitCast:
378   case Instruction::AddrSpaceCast:
379     return true;
380   case Instruction::Store: {
381     // Must be the stored pointer operand, not a stored value.
382     StoreInst *SI = cast<StoreInst>(Inst);
383     return SI->getPointerOperand() == User;
384   }
385   default:
386     return false;
387   }
388 }
389 
390 static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
391   ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
392 
393   DEBUG(dbgs() << "Alloca candidate for vectorization\n");
394 
395   // FIXME: There is no reason why we can't support larger arrays, we
396   // are just being conservative for now.
397   if (!AllocaTy ||
398       AllocaTy->getElementType()->isVectorTy() ||
399       AllocaTy->getNumElements() > 4) {
400     DEBUG(dbgs() << "  Cannot convert type to vector\n");
401     return false;
402   }
403 
404   std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
405   std::vector<Value*> WorkList;
406   for (User *AllocaUser : Alloca->users()) {
407     GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
408     if (!GEP) {
409       if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
410         return false;
411 
412       WorkList.push_back(AllocaUser);
413       continue;
414     }
415 
416     Value *Index = GEPToVectorIndex(GEP);
417 
418     // If we can't compute a vector index from this GEP, then we can't
419     // promote this alloca to vector.
420     if (!Index) {
421       DEBUG(dbgs() << "  Cannot compute vector index for GEP " << *GEP << '\n');
422       return false;
423     }
424 
425     GEPVectorIdx[GEP] = Index;
426     for (User *GEPUser : AllocaUser->users()) {
427       if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
428         return false;
429 
430       WorkList.push_back(GEPUser);
431     }
432   }
433 
434   VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
435 
436   DEBUG(dbgs() << "  Converting alloca to vector "
437         << *AllocaTy << " -> " << *VectorTy << '\n');
438 
439   for (Value *V : WorkList) {
440     Instruction *Inst = cast<Instruction>(V);
441     IRBuilder<> Builder(Inst);
442     switch (Inst->getOpcode()) {
443     case Instruction::Load: {
444       Value *Ptr = Inst->getOperand(0);
445       Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
446       Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
447       Value *VecValue = Builder.CreateLoad(BitCast);
448       Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
449       Inst->replaceAllUsesWith(ExtractElement);
450       Inst->eraseFromParent();
451       break;
452     }
453     case Instruction::Store: {
454       Value *Ptr = Inst->getOperand(1);
455       Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
456       Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
457       Value *VecValue = Builder.CreateLoad(BitCast);
458       Value *NewVecValue = Builder.CreateInsertElement(VecValue,
459                                                        Inst->getOperand(0),
460                                                        Index);
461       Builder.CreateStore(NewVecValue, BitCast);
462       Inst->eraseFromParent();
463       break;
464     }
465     case Instruction::BitCast:
466     case Instruction::AddrSpaceCast:
467       break;
468 
469     default:
470       Inst->dump();
471       llvm_unreachable("Inconsistency in instructions promotable to vector");
472     }
473   }
474   return true;
475 }
476 
477 static bool isCallPromotable(CallInst *CI) {
478   // TODO: We might be able to handle some cases where the callee is a
479   // constantexpr bitcast of a function.
480   if (!CI->getCalledFunction())
481     return false;
482 
483   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
484   if (!II)
485     return false;
486 
487   switch (II->getIntrinsicID()) {
488   case Intrinsic::memcpy:
489   case Intrinsic::memmove:
490   case Intrinsic::memset:
491   case Intrinsic::lifetime_start:
492   case Intrinsic::lifetime_end:
493   case Intrinsic::invariant_start:
494   case Intrinsic::invariant_end:
495   case Intrinsic::invariant_group_barrier:
496   case Intrinsic::objectsize:
497     return true;
498   default:
499     return false;
500   }
501 }
502 
503 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
504                                                           Value *Val,
505                                                           Instruction *Inst,
506                                                           int OpIdx0,
507                                                           int OpIdx1) const {
508   // Figure out which operand is the one we might not be promoting.
509   Value *OtherOp = Inst->getOperand(OpIdx0);
510   if (Val == OtherOp)
511     OtherOp = Inst->getOperand(OpIdx1);
512 
513   Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
514   if (!isa<AllocaInst>(OtherObj))
515     return false;
516 
517   // TODO: We should be able to replace undefs with the right pointer type.
518 
519   // TODO: If we know the other base object is another promotable
520   // alloca, not necessarily this alloca, we can do this. The
521   // important part is both must have the same address space at
522   // the end.
523   if (OtherObj != BaseAlloca) {
524     DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
525     return false;
526   }
527 
528   return true;
529 }
530 
531 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
532   Value *BaseAlloca,
533   Value *Val,
534   std::vector<Value*> &WorkList) const {
535 
536   for (User *User : Val->users()) {
537     if (std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
538       continue;
539 
540     if (CallInst *CI = dyn_cast<CallInst>(User)) {
541       if (!isCallPromotable(CI))
542         return false;
543 
544       WorkList.push_back(User);
545       continue;
546     }
547 
548     Instruction *UseInst = cast<Instruction>(User);
549     if (UseInst->getOpcode() == Instruction::PtrToInt)
550       return false;
551 
552     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
553       if (SI->isVolatile())
554         return false;
555 
556       // Reject if the stored value is not the pointer operand.
557       if (SI->getPointerOperand() != Val)
558         return false;
559     } else if (LoadInst *LI = dyn_cast_or_null<LoadInst>(UseInst)) {
560       if (LI->isVolatile())
561         return false;
562     } else if (AtomicRMWInst *RMW = dyn_cast_or_null<AtomicRMWInst>(UseInst)) {
563       if (RMW->isVolatile())
564         return false;
565     } else if (AtomicCmpXchgInst *CAS
566                = dyn_cast_or_null<AtomicCmpXchgInst>(UseInst)) {
567       if (CAS->isVolatile())
568         return false;
569     }
570 
571     // Only promote a select if we know that the other select operand
572     // is from another pointer that will also be promoted.
573     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
574       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
575         return false;
576     }
577 
578     if (!User->getType()->isPointerTy())
579       continue;
580 
581     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
582       // Be conservative if an address could be computed outside the bounds of
583       // the alloca.
584       if (!GEP->isInBounds())
585         return false;
586     }
587 
588     // Only promote a select if we know that the other select operand is from
589     // another pointer that will also be promoted.
590     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
591       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
592         return false;
593     }
594 
595     // Repeat for phis.
596     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
597       // TODO: Handle more complex cases. We should be able to replace loops
598       // over arrays.
599       switch (Phi->getNumIncomingValues()) {
600       case 1:
601         break;
602       case 2:
603         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
604           return false;
605         break;
606       default:
607         return false;
608       }
609     }
610 
611     WorkList.push_back(User);
612     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
613       return false;
614   }
615 
616   return true;
617 }
618 
619 // FIXME: Should try to pick the most likely to be profitable allocas first.
620 void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
621   // Array allocations are probably not worth handling, since an allocation of
622   // the array type is the canonical form.
623   if (!I.isStaticAlloca() || I.isArrayAllocation())
624     return;
625 
626   IRBuilder<> Builder(&I);
627 
628   // First try to replace the alloca with a vector
629   Type *AllocaTy = I.getAllocatedType();
630 
631   DEBUG(dbgs() << "Trying to promote " << I << '\n');
632 
633   if (tryPromoteAllocaToVector(&I)) {
634     DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
635     return;
636   }
637 
638   const Function &ContainingFunction = *I.getParent()->getParent();
639 
640   // FIXME: We should also try to get this value from the reqd_work_group_size
641   // function attribute if it is available.
642   unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction);
643 
644   const DataLayout &DL = Mod->getDataLayout();
645 
646   unsigned Align = I.getAlignment();
647   if (Align == 0)
648     Align = DL.getABITypeAlignment(I.getAllocatedType());
649 
650   // FIXME: This computed padding is likely wrong since it depends on inverse
651   // usage order.
652   //
653   // FIXME: It is also possible that if we're allowed to use all of the memory
654   // could could end up using more than the maximum due to alignment padding.
655 
656   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
657   uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
658   NewSize += AllocSize;
659 
660   if (NewSize > LocalMemLimit) {
661     DEBUG(dbgs() << "  " << AllocSize
662           << " bytes of local memory not available to promote\n");
663     return;
664   }
665 
666   CurrentLocalMemUsage = NewSize;
667 
668   std::vector<Value*> WorkList;
669 
670   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
671     DEBUG(dbgs() << " Do not know how to convert all uses\n");
672     return;
673   }
674 
675   DEBUG(dbgs() << "Promoting alloca to local memory\n");
676 
677   Function *F = I.getParent()->getParent();
678 
679   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
680   GlobalVariable *GV = new GlobalVariable(
681       *Mod, GVTy, false, GlobalValue::InternalLinkage,
682       UndefValue::get(GVTy),
683       Twine(F->getName()) + Twine('.') + I.getName(),
684       nullptr,
685       GlobalVariable::NotThreadLocal,
686       AMDGPUAS::LOCAL_ADDRESS);
687   GV->setUnnamedAddr(true);
688   GV->setAlignment(I.getAlignment());
689 
690   Value *TCntY, *TCntZ;
691 
692   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
693   Value *TIdX = getWorkitemID(Builder, 0);
694   Value *TIdY = getWorkitemID(Builder, 1);
695   Value *TIdZ = getWorkitemID(Builder, 2);
696 
697   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
698   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
699   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
700   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
701   TID = Builder.CreateAdd(TID, TIdZ);
702 
703   Value *Indices[] = {
704     Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
705     TID
706   };
707 
708   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
709   I.mutateType(Offset->getType());
710   I.replaceAllUsesWith(Offset);
711   I.eraseFromParent();
712 
713   for (Value *V : WorkList) {
714     CallInst *Call = dyn_cast<CallInst>(V);
715     if (!Call) {
716       Type *EltTy = V->getType()->getPointerElementType();
717       PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
718 
719       // The operand's value should be corrected on its own.
720       if (isa<AddrSpaceCastInst>(V))
721         continue;
722 
723       // FIXME: It doesn't really make sense to try to do this for all
724       // instructions.
725       V->mutateType(NewTy);
726       continue;
727     }
728 
729     IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
730     if (!Intr) {
731       // FIXME: What is this for? It doesn't make sense to promote arbitrary
732       // function calls. If the call is to a defined function that can also be
733       // promoted, we should be able to do this once that function is also
734       // rewritten.
735 
736       std::vector<Type*> ArgTypes;
737       for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
738                                 ArgIdx != ArgEnd; ++ArgIdx) {
739         ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType());
740       }
741       Function *F = Call->getCalledFunction();
742       FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes,
743                                                 F->isVarArg());
744       Constant *C = Mod->getOrInsertFunction((F->getName() + ".local").str(),
745                                              NewType, F->getAttributes());
746       Function *NewF = cast<Function>(C);
747       Call->setCalledFunction(NewF);
748       continue;
749     }
750 
751     Builder.SetInsertPoint(Intr);
752     switch (Intr->getIntrinsicID()) {
753     case Intrinsic::lifetime_start:
754     case Intrinsic::lifetime_end:
755       // These intrinsics are for address space 0 only
756       Intr->eraseFromParent();
757       continue;
758     case Intrinsic::memcpy: {
759       MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
760       Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
761                            MemCpy->getLength(), MemCpy->getAlignment(),
762                            MemCpy->isVolatile());
763       Intr->eraseFromParent();
764       continue;
765     }
766     case Intrinsic::memmove: {
767       MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
768       Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
769                             MemMove->getLength(), MemMove->getAlignment(),
770                             MemMove->isVolatile());
771       Intr->eraseFromParent();
772       continue;
773     }
774     case Intrinsic::memset: {
775       MemSetInst *MemSet = cast<MemSetInst>(Intr);
776       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
777                            MemSet->getLength(), MemSet->getAlignment(),
778                            MemSet->isVolatile());
779       Intr->eraseFromParent();
780       continue;
781     }
782     case Intrinsic::invariant_start:
783     case Intrinsic::invariant_end:
784     case Intrinsic::invariant_group_barrier:
785       Intr->eraseFromParent();
786       // FIXME: I think the invariant marker should still theoretically apply,
787       // but the intrinsics need to be changed to accept pointers with any
788       // address space.
789       continue;
790     case Intrinsic::objectsize: {
791       Value *Src = Intr->getOperand(0);
792       Type *SrcTy = Src->getType()->getPointerElementType();
793       Function *ObjectSize = Intrinsic::getDeclaration(Mod,
794         Intrinsic::objectsize,
795         { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
796       );
797 
798       CallInst *NewCall
799         = Builder.CreateCall(ObjectSize, { Src, Intr->getOperand(1) });
800       Intr->replaceAllUsesWith(NewCall);
801       Intr->eraseFromParent();
802       continue;
803     }
804     default:
805       Intr->dump();
806       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
807     }
808   }
809 }
810 
811 FunctionPass *llvm::createAMDGPUPromoteAlloca(const TargetMachine *TM) {
812   return new AMDGPUPromoteAlloca(TM);
813 }
814