1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplification or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "AMDGPU.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "Utils/AMDGPUMemoryUtils.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/Analysis/CallGraph.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/OptimizedStructLayout.h"
46 #include "llvm/Transforms/Utils/ModuleUtils.h"
47 #include <vector>
48 
49 #define DEBUG_TYPE "amdgpu-lower-module-lds"
50 
51 using namespace llvm;
52 
53 static cl::opt<bool> SuperAlignLDSGlobals(
54     "amdgpu-super-align-lds-globals",
55     cl::desc("Increase alignment of LDS if it is not on align boundary"),
56     cl::init(true), cl::Hidden);
57 
58 namespace {
59 class AMDGPULowerModuleLDS : public ModulePass {
60 
61   static void removeFromUsedList(Module &M, StringRef Name,
62                                  SmallPtrSetImpl<Constant *> &ToRemove) {
63     GlobalVariable *GV = M.getNamedGlobal(Name);
64     if (!GV || ToRemove.empty()) {
65       return;
66     }
67 
68     SmallVector<Constant *, 16> Init;
69     auto *CA = cast<ConstantArray>(GV->getInitializer());
70     for (auto &Op : CA->operands()) {
71       // ModuleUtils::appendToUsed only inserts Constants
72       Constant *C = cast<Constant>(Op);
73       if (!ToRemove.contains(C->stripPointerCasts())) {
74         Init.push_back(C);
75       }
76     }
77 
78     if (Init.size() == CA->getNumOperands()) {
79       return; // none to remove
80     }
81 
82     GV->eraseFromParent();
83 
84     for (Constant *C : ToRemove) {
85       C->removeDeadConstantUsers();
86     }
87 
88     if (!Init.empty()) {
89       ArrayType *ATy =
90           ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
91       GV =
92           new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
93                                    ConstantArray::get(ATy, Init), Name);
94       GV->setSection("llvm.metadata");
95     }
96   }
97 
98   static void
99   removeFromUsedLists(Module &M,
100                       const std::vector<GlobalVariable *> &LocalVars) {
101     SmallPtrSet<Constant *, 32> LocalVarsSet;
102     for (GlobalVariable *LocalVar : LocalVars)
103       if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts()))
104         LocalVarsSet.insert(C);
105     removeFromUsedList(M, "llvm.used", LocalVarsSet);
106     removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
107   }
108 
109   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
110                                GlobalVariable *SGV) {
111     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
112     // that might call a function which accesses a field within it. This is
113     // presently approximated to 'all kernels' if there are any such functions
114     // in the module. This implicit use is redefined as an explicit use here so
115     // that later passes, specifically PromoteAlloca, account for the required
116     // memory without any knowledge of this transform.
117 
118     // An operand bundle on llvm.donothing works because the call instruction
119     // survives until after the last pass that needs to account for LDS. It is
120     // better than inline asm as the latter survives until the end of codegen. A
121     // totally robust solution would be a function with the same semantics as
122     // llvm.donothing that takes a pointer to the instance and is lowered to a
123     // no-op after LDS is allocated, but that is not presently necessary.
124 
125     LLVMContext &Ctx = Func->getContext();
126 
127     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
128 
129     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
130 
131     Function *Decl =
132         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
133 
134     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
135         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
136 
137     Builder.CreateCall(FTy, Decl, {},
138                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
139                        "");
140   }
141 
142 public:
143   static char ID;
144 
145   AMDGPULowerModuleLDS() : ModulePass(ID) {
146     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
147   }
148 
149   bool runOnModule(Module &M) override {
150     CallGraph CG = CallGraph(M);
151     bool Changed = superAlignLDSGlobals(M);
152     std::vector<GlobalVariable *> ModuleScopeVariables =
153         AMDGPU::findVariablesToLower(M, nullptr);
154     Changed |= processUsedLDS(CG, M, ModuleScopeVariables);
155 
156     for (Function &F : M.functions()) {
157       if (F.isDeclaration())
158         continue;
159 
160       // Only lower compute kernels' LDS.
161       if (!AMDGPU::isKernel(F.getCallingConv()))
162         continue;
163       std::vector<GlobalVariable *> KernelUsedVariables =
164           AMDGPU::findVariablesToLower(M, &F);
165       Changed |= processUsedLDS(CG, M, KernelUsedVariables, &F);
166     }
167 
168     return Changed;
169   }
170 
171 private:
172   // Increase the alignment of LDS globals if necessary to maximise the chance
173   // that we can use aligned LDS instructions to access them.
174   static bool superAlignLDSGlobals(Module &M) {
175     const DataLayout &DL = M.getDataLayout();
176     bool Changed = false;
177     if (!SuperAlignLDSGlobals) {
178       return Changed;
179     }
180 
181     for (auto &GV : M.globals()) {
182       if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
183         // Only changing alignment of LDS variables
184         continue;
185       }
186       if (!GV.hasInitializer()) {
187         // cuda/hip extern __shared__ variable, leave alignment alone
188         continue;
189       }
190 
191       Align Alignment = AMDGPU::getAlign(DL, &GV);
192       TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
193 
194       if (GVSize > 8) {
195         // We might want to use a b96 or b128 load/store
196         Alignment = std::max(Alignment, Align(16));
197       } else if (GVSize > 4) {
198         // We might want to use a b64 load/store
199         Alignment = std::max(Alignment, Align(8));
200       } else if (GVSize > 2) {
201         // We might want to use a b32 load/store
202         Alignment = std::max(Alignment, Align(4));
203       } else if (GVSize > 1) {
204         // We might want to use a b16 load/store
205         Alignment = std::max(Alignment, Align(2));
206       }
207 
208       if (Alignment != AMDGPU::getAlign(DL, &GV)) {
209         Changed = true;
210         GV.setAlignment(Alignment);
211       }
212     }
213     return Changed;
214   }
215 
216   bool processUsedLDS(CallGraph const &CG, Module &M,
217                       std::vector<GlobalVariable *> const &LDSVarsToTransform,
218                       Function *F = nullptr) {
219     LLVMContext &Ctx = M.getContext();
220     const DataLayout &DL = M.getDataLayout();
221 
222     if (LDSVarsToTransform.empty()) {
223       // No variables to rewrite, no changes made.
224       return false;
225     }
226 
227     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
228     LayoutFields.reserve(LDSVarsToTransform.size());
229     for (GlobalVariable *GV : LDSVarsToTransform) {
230       OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
231                                    AMDGPU::getAlign(DL, GV));
232       LayoutFields.emplace_back(F);
233     }
234 
235     performOptimizedStructLayout(LayoutFields);
236 
237     std::vector<GlobalVariable *> LocalVars;
238     BitVector IsPaddingField;
239     LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
240     IsPaddingField.reserve(LDSVarsToTransform.size());
241     {
242       // This usually won't need to insert any padding, perhaps avoid the alloc
243       uint64_t CurrentOffset = 0;
244       for (size_t I = 0; I < LayoutFields.size(); I++) {
245         GlobalVariable *FGV = static_cast<GlobalVariable *>(
246             const_cast<void *>(LayoutFields[I].Id));
247         Align DataAlign = LayoutFields[I].Alignment;
248 
249         uint64_t DataAlignV = DataAlign.value();
250         if (uint64_t Rem = CurrentOffset % DataAlignV) {
251           uint64_t Padding = DataAlignV - Rem;
252 
253           // Append an array of padding bytes to meet alignment requested
254           // Note (o +      (a - (o % a)) ) % a == 0
255           //      (offset + Padding       ) % align == 0
256 
257           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
258           LocalVars.push_back(new GlobalVariable(
259               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
260               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
261               false));
262           IsPaddingField.push_back(true);
263           CurrentOffset += Padding;
264         }
265 
266         LocalVars.push_back(FGV);
267         IsPaddingField.push_back(false);
268         CurrentOffset += LayoutFields[I].Size;
269       }
270     }
271 
272     std::vector<Type *> LocalVarTypes;
273     LocalVarTypes.reserve(LocalVars.size());
274     std::transform(
275         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
276         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
277 
278     std::string VarName(
279         F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
280           : "llvm.amdgcn.module.lds");
281     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
282 
283     Align StructAlign =
284         AMDGPU::getAlign(DL, LocalVars[0]);
285 
286     GlobalVariable *SGV = new GlobalVariable(
287         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
288         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
289         false);
290     SGV->setAlignment(StructAlign);
291     if (!F) {
292       appendToCompilerUsed(
293           M, {static_cast<GlobalValue *>(
294                  ConstantExpr::getPointerBitCastOrAddrSpaceCast(
295                      cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
296     }
297 
298     // The verifier rejects used lists containing an inttoptr of a constant
299     // so remove the variables from these lists before replaceAllUsesWith
300     removeFromUsedLists(M, LocalVars);
301 
302     // Create alias.scope and their lists. Each field in the new structure
303     // does not alias with all other fields.
304     SmallVector<MDNode *> AliasScopes;
305     SmallVector<Metadata *> NoAliasList;
306     if (LocalVars.size() > 1) {
307       MDBuilder MDB(Ctx);
308       AliasScopes.reserve(LocalVars.size());
309       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
310       for (size_t I = 0; I < LocalVars.size(); I++) {
311         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
312         AliasScopes.push_back(Scope);
313       }
314       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
315     }
316 
317     // Replace uses of ith variable with a constantexpr to the ith field of the
318     // instance that will be allocated by AMDGPUMachineFunction
319     Type *I32 = Type::getInt32Ty(Ctx);
320     for (size_t I = 0; I < LocalVars.size(); I++) {
321       GlobalVariable *GV = LocalVars[I];
322       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
323       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
324       if (F && !IsPaddingField[I]) {
325         // Replace all constant uses with instructions if they belong to the
326         // current kernel.
327         for (User *U : make_early_inc_range(GV->users())) {
328           if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
329             AMDGPU::replaceConstantUsesInFunction(C, F);
330         }
331 
332         GV->removeDeadConstantUsers();
333 
334         GV->replaceUsesWithIf(GEP, [F](Use &U) {
335           Instruction *I = dyn_cast<Instruction>(U.getUser());
336           return I && I->getFunction() == F;
337         });
338       } else {
339         GV->replaceAllUsesWith(GEP);
340       }
341       if (GV->use_empty() && !IsPaddingField[I]) {
342         GV->eraseFromParent();
343       }
344 
345       uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I);
346       Align A = commonAlignment(StructAlign, Off);
347 
348       if (I)
349         NoAliasList[I - 1] = AliasScopes[I - 1];
350       MDNode *NoAlias =
351           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
352       MDNode *AliasScope =
353           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
354 
355       if (!IsPaddingField[I]) {
356         refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
357       }
358     }
359 
360     for (size_t I = 0; I < LocalVars.size(); I++) {
361       if (IsPaddingField[I]) {
362         GlobalVariable *GV = LocalVars[I];
363         assert(GV->use_empty());
364         GV->eraseFromParent();
365       }
366     }
367 
368     // This ensures the variable is allocated when called functions access it.
369     // It also lets other passes, specifically PromoteAlloca, accurately
370     // calculate how much LDS will be used by the kernel after lowering.
371     if (!F) {
372       IRBuilder<> Builder(Ctx);
373       for (Function &Func : M.functions()) {
374         if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
375           const CallGraphNode *N = CG[&Func];
376           const bool CalleesRequireModuleLDS = N->size() > 0;
377 
378           if (CalleesRequireModuleLDS) {
379             // If a function this kernel might call requires module LDS,
380             // annotate the kernel to let later passes know it will allocate
381             // this structure, even if not apparent from the IR.
382             markUsedByKernel(Builder, &Func, SGV);
383           } else {
384             // However if we are certain this kernel cannot call a function that
385             // requires module LDS, annotate the kernel so the backend can elide
386             // the allocation without repeating callgraph walks.
387             Func.addFnAttr("amdgpu-elide-module-lds");
388           }
389         }
390       }
391     }
392     return true;
393   }
394 
395   void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
396                                 MDNode *AliasScope, MDNode *NoAlias,
397                                 unsigned MaxDepth = 5) {
398     if (!MaxDepth || (A == 1 && !AliasScope))
399       return;
400 
401     for (User *U : Ptr->users()) {
402       if (auto *I = dyn_cast<Instruction>(U)) {
403         if (AliasScope && I->mayReadOrWriteMemory()) {
404           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
405           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
406                    : AliasScope);
407           I->setMetadata(LLVMContext::MD_alias_scope, AS);
408 
409           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
410           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
411           I->setMetadata(LLVMContext::MD_noalias, NA);
412         }
413       }
414 
415       if (auto *LI = dyn_cast<LoadInst>(U)) {
416         LI->setAlignment(std::max(A, LI->getAlign()));
417         continue;
418       }
419       if (auto *SI = dyn_cast<StoreInst>(U)) {
420         if (SI->getPointerOperand() == Ptr)
421           SI->setAlignment(std::max(A, SI->getAlign()));
422         continue;
423       }
424       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
425         // None of atomicrmw operations can work on pointers, but let's
426         // check it anyway in case it will or we will process ConstantExpr.
427         if (AI->getPointerOperand() == Ptr)
428           AI->setAlignment(std::max(A, AI->getAlign()));
429         continue;
430       }
431       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
432         if (AI->getPointerOperand() == Ptr)
433           AI->setAlignment(std::max(A, AI->getAlign()));
434         continue;
435       }
436       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
437         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
438         APInt Off(BitWidth, 0);
439         if (GEP->getPointerOperand() == Ptr) {
440           Align GA;
441           if (GEP->accumulateConstantOffset(DL, Off))
442             GA = commonAlignment(A, Off.getLimitedValue());
443           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
444                                    MaxDepth - 1);
445         }
446         continue;
447       }
448       if (auto *I = dyn_cast<Instruction>(U)) {
449         if (I->getOpcode() == Instruction::BitCast ||
450             I->getOpcode() == Instruction::AddrSpaceCast)
451           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
452       }
453     }
454   }
455 };
456 
457 } // namespace
458 char AMDGPULowerModuleLDS::ID = 0;
459 
460 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
461 
462 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
463                 "Lower uses of LDS variables from non-kernel functions", false,
464                 false)
465 
466 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
467   return new AMDGPULowerModuleLDS();
468 }
469 
470 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
471                                                 ModuleAnalysisManager &) {
472   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
473                                                : PreservedAnalyses::all();
474 }
475