1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplification or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 //===----------------------------------------------------------------------===//
28
29 #include "AMDGPU.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "Utils/AMDGPUMemoryUtils.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/Analysis/CallGraph.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/InlineAsm.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/OptimizedStructLayout.h"
47 #include "llvm/Transforms/Utils/ModuleUtils.h"
48 #include <tuple>
49 #include <vector>
50
51 #define DEBUG_TYPE "amdgpu-lower-module-lds"
52
53 using namespace llvm;
54
55 static cl::opt<bool> SuperAlignLDSGlobals(
56 "amdgpu-super-align-lds-globals",
57 cl::desc("Increase alignment of LDS if it is not on align boundary"),
58 cl::init(true), cl::Hidden);
59
60 namespace {
61 class AMDGPULowerModuleLDS : public ModulePass {
62
removeFromUsedList(Module & M,StringRef Name,SmallPtrSetImpl<Constant * > & ToRemove)63 static void removeFromUsedList(Module &M, StringRef Name,
64 SmallPtrSetImpl<Constant *> &ToRemove) {
65 GlobalVariable *GV = M.getNamedGlobal(Name);
66 if (!GV || ToRemove.empty()) {
67 return;
68 }
69
70 SmallVector<Constant *, 16> Init;
71 auto *CA = cast<ConstantArray>(GV->getInitializer());
72 for (auto &Op : CA->operands()) {
73 // ModuleUtils::appendToUsed only inserts Constants
74 Constant *C = cast<Constant>(Op);
75 if (!ToRemove.contains(C->stripPointerCasts())) {
76 Init.push_back(C);
77 }
78 }
79
80 if (Init.size() == CA->getNumOperands()) {
81 return; // none to remove
82 }
83
84 GV->eraseFromParent();
85
86 for (Constant *C : ToRemove) {
87 C->removeDeadConstantUsers();
88 }
89
90 if (!Init.empty()) {
91 ArrayType *ATy =
92 ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
93 GV =
94 new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
95 ConstantArray::get(ATy, Init), Name);
96 GV->setSection("llvm.metadata");
97 }
98 }
99
100 static void
removeFromUsedLists(Module & M,const std::vector<GlobalVariable * > & LocalVars)101 removeFromUsedLists(Module &M,
102 const std::vector<GlobalVariable *> &LocalVars) {
103 // The verifier rejects used lists containing an inttoptr of a constant
104 // so remove the variables from these lists before replaceAllUsesWith
105
106 SmallPtrSet<Constant *, 32> LocalVarsSet;
107 for (GlobalVariable *LocalVar : LocalVars)
108 if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts()))
109 LocalVarsSet.insert(C);
110 removeFromUsedList(M, "llvm.used", LocalVarsSet);
111 removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
112 }
113
markUsedByKernel(IRBuilder<> & Builder,Function * Func,GlobalVariable * SGV)114 static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
115 GlobalVariable *SGV) {
116 // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
117 // that might call a function which accesses a field within it. This is
118 // presently approximated to 'all kernels' if there are any such functions
119 // in the module. This implicit use is redefined as an explicit use here so
120 // that later passes, specifically PromoteAlloca, account for the required
121 // memory without any knowledge of this transform.
122
123 // An operand bundle on llvm.donothing works because the call instruction
124 // survives until after the last pass that needs to account for LDS. It is
125 // better than inline asm as the latter survives until the end of codegen. A
126 // totally robust solution would be a function with the same semantics as
127 // llvm.donothing that takes a pointer to the instance and is lowered to a
128 // no-op after LDS is allocated, but that is not presently necessary.
129
130 LLVMContext &Ctx = Func->getContext();
131
132 Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
133
134 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
135
136 Function *Decl =
137 Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
138
139 Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
140 SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
141
142 Builder.CreateCall(FTy, Decl, {},
143 {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
144 "");
145 }
146
147 public:
148 static char ID;
149
AMDGPULowerModuleLDS()150 AMDGPULowerModuleLDS() : ModulePass(ID) {
151 initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
152 }
153
runOnModule(Module & M)154 bool runOnModule(Module &M) override {
155 LLVMContext &Ctx = M.getContext();
156 CallGraph CG = CallGraph(M);
157 bool Changed = superAlignLDSGlobals(M);
158
159 // Move variables used by functions into amdgcn.module.lds
160 std::vector<GlobalVariable *> ModuleScopeVariables =
161 AMDGPU::findVariablesToLower(M, nullptr);
162 if (!ModuleScopeVariables.empty()) {
163 std::string VarName = "llvm.amdgcn.module.lds";
164
165 GlobalVariable *SGV;
166 DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
167 std::tie(SGV, LDSVarsToConstantGEP) =
168 createLDSVariableReplacement(M, VarName, ModuleScopeVariables);
169
170 appendToCompilerUsed(
171 M, {static_cast<GlobalValue *>(
172 ConstantExpr::getPointerBitCastOrAddrSpaceCast(
173 cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
174
175 removeFromUsedLists(M, ModuleScopeVariables);
176 replaceLDSVariablesWithStruct(M, ModuleScopeVariables, SGV,
177 LDSVarsToConstantGEP,
178 [](Use &) { return true; });
179
180 // This ensures the variable is allocated when called functions access it.
181 // It also lets other passes, specifically PromoteAlloca, accurately
182 // calculate how much LDS will be used by the kernel after lowering.
183
184 IRBuilder<> Builder(Ctx);
185 for (Function &Func : M.functions()) {
186 if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
187 const CallGraphNode *N = CG[&Func];
188 const bool CalleesRequireModuleLDS = N->size() > 0;
189
190 if (CalleesRequireModuleLDS) {
191 // If a function this kernel might call requires module LDS,
192 // annotate the kernel to let later passes know it will allocate
193 // this structure, even if not apparent from the IR.
194 markUsedByKernel(Builder, &Func, SGV);
195 } else {
196 // However if we are certain this kernel cannot call a function that
197 // requires module LDS, annotate the kernel so the backend can elide
198 // the allocation without repeating callgraph walks.
199 Func.addFnAttr("amdgpu-elide-module-lds");
200 }
201 }
202 }
203
204 Changed = true;
205 }
206
207 // Move variables used by kernels into per-kernel instances
208 for (Function &F : M.functions()) {
209 if (F.isDeclaration())
210 continue;
211
212 // Only lower compute kernels' LDS.
213 if (!AMDGPU::isKernel(F.getCallingConv()))
214 continue;
215
216 std::vector<GlobalVariable *> KernelUsedVariables =
217 AMDGPU::findVariablesToLower(M, &F);
218
219 // Replace all constant uses with instructions if they belong to the
220 // current kernel. Unnecessary, removing will cause test churn.
221 for (size_t I = 0; I < KernelUsedVariables.size(); I++) {
222 GlobalVariable *GV = KernelUsedVariables[I];
223 for (User *U : make_early_inc_range(GV->users())) {
224 if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
225 AMDGPU::replaceConstantUsesInFunction(C, &F);
226 }
227 GV->removeDeadConstantUsers();
228 }
229
230 if (!KernelUsedVariables.empty()) {
231 std::string VarName =
232 (Twine("llvm.amdgcn.kernel.") + F.getName() + ".lds").str();
233 GlobalVariable *SGV;
234 DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
235 std::tie(SGV, LDSVarsToConstantGEP) =
236 createLDSVariableReplacement(M, VarName, KernelUsedVariables);
237
238 removeFromUsedLists(M, KernelUsedVariables);
239 replaceLDSVariablesWithStruct(
240 M, KernelUsedVariables, SGV, LDSVarsToConstantGEP, [&F](Use &U) {
241 Instruction *I = dyn_cast<Instruction>(U.getUser());
242 return I && I->getFunction() == &F;
243 });
244 Changed = true;
245 }
246 }
247
248 return Changed;
249 }
250
251 private:
252 // Increase the alignment of LDS globals if necessary to maximise the chance
253 // that we can use aligned LDS instructions to access them.
superAlignLDSGlobals(Module & M)254 static bool superAlignLDSGlobals(Module &M) {
255 const DataLayout &DL = M.getDataLayout();
256 bool Changed = false;
257 if (!SuperAlignLDSGlobals) {
258 return Changed;
259 }
260
261 for (auto &GV : M.globals()) {
262 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
263 // Only changing alignment of LDS variables
264 continue;
265 }
266 if (!GV.hasInitializer()) {
267 // cuda/hip extern __shared__ variable, leave alignment alone
268 continue;
269 }
270
271 Align Alignment = AMDGPU::getAlign(DL, &GV);
272 TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
273
274 if (GVSize > 8) {
275 // We might want to use a b96 or b128 load/store
276 Alignment = std::max(Alignment, Align(16));
277 } else if (GVSize > 4) {
278 // We might want to use a b64 load/store
279 Alignment = std::max(Alignment, Align(8));
280 } else if (GVSize > 2) {
281 // We might want to use a b32 load/store
282 Alignment = std::max(Alignment, Align(4));
283 } else if (GVSize > 1) {
284 // We might want to use a b16 load/store
285 Alignment = std::max(Alignment, Align(2));
286 }
287
288 if (Alignment != AMDGPU::getAlign(DL, &GV)) {
289 Changed = true;
290 GV.setAlignment(Alignment);
291 }
292 }
293 return Changed;
294 }
295
296 std::tuple<GlobalVariable *, DenseMap<GlobalVariable *, Constant *>>
createLDSVariableReplacement(Module & M,std::string VarName,std::vector<GlobalVariable * > const & LDSVarsToTransform)297 createLDSVariableReplacement(
298 Module &M, std::string VarName,
299 std::vector<GlobalVariable *> const &LDSVarsToTransform) {
300 // Create a struct instance containing LDSVarsToTransform and map from those
301 // variables to ConstantExprGEP
302 // Variables may be introduced to meet alignment requirements. No aliasing
303 // metadata is useful for these as they have no uses. Erased before return.
304
305 LLVMContext &Ctx = M.getContext();
306 const DataLayout &DL = M.getDataLayout();
307 assert(!LDSVarsToTransform.empty());
308
309 SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
310 LayoutFields.reserve(LDSVarsToTransform.size());
311 for (GlobalVariable *GV : LDSVarsToTransform) {
312 OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
313 AMDGPU::getAlign(DL, GV));
314 LayoutFields.emplace_back(F);
315 }
316
317 performOptimizedStructLayout(LayoutFields);
318
319 std::vector<GlobalVariable *> LocalVars;
320 BitVector IsPaddingField;
321 LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
322 IsPaddingField.reserve(LDSVarsToTransform.size());
323 {
324 uint64_t CurrentOffset = 0;
325 for (size_t I = 0; I < LayoutFields.size(); I++) {
326 GlobalVariable *FGV = static_cast<GlobalVariable *>(
327 const_cast<void *>(LayoutFields[I].Id));
328 Align DataAlign = LayoutFields[I].Alignment;
329
330 uint64_t DataAlignV = DataAlign.value();
331 if (uint64_t Rem = CurrentOffset % DataAlignV) {
332 uint64_t Padding = DataAlignV - Rem;
333
334 // Append an array of padding bytes to meet alignment requested
335 // Note (o + (a - (o % a)) ) % a == 0
336 // (offset + Padding ) % align == 0
337
338 Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
339 LocalVars.push_back(new GlobalVariable(
340 M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
341 "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
342 false));
343 IsPaddingField.push_back(true);
344 CurrentOffset += Padding;
345 }
346
347 LocalVars.push_back(FGV);
348 IsPaddingField.push_back(false);
349 CurrentOffset += LayoutFields[I].Size;
350 }
351 }
352
353 std::vector<Type *> LocalVarTypes;
354 LocalVarTypes.reserve(LocalVars.size());
355 std::transform(
356 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
357 [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
358
359 StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
360
361 Align StructAlign =
362 AMDGPU::getAlign(DL, LocalVars[0]);
363
364 GlobalVariable *SGV = new GlobalVariable(
365 M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
366 VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
367 false);
368 SGV->setAlignment(StructAlign);
369
370 DenseMap<GlobalVariable *, Constant *> Map;
371 Type *I32 = Type::getInt32Ty(Ctx);
372 for (size_t I = 0; I < LocalVars.size(); I++) {
373 GlobalVariable *GV = LocalVars[I];
374 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
375 Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
376 if (IsPaddingField[I]) {
377 assert(GV->use_empty());
378 GV->eraseFromParent();
379 } else {
380 Map[GV] = GEP;
381 }
382 }
383 assert(Map.size() == LDSVarsToTransform.size());
384 return {SGV, std::move(Map)};
385 }
386
387 template <typename PredicateTy>
replaceLDSVariablesWithStruct(Module & M,std::vector<GlobalVariable * > const & LDSVarsToTransform,GlobalVariable * SGV,DenseMap<GlobalVariable *,Constant * > & LDSVarsToConstantGEP,PredicateTy Predicate)388 void replaceLDSVariablesWithStruct(
389 Module &M, std::vector<GlobalVariable *> const &LDSVarsToTransform,
390 GlobalVariable *SGV,
391 DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP,
392 PredicateTy Predicate) {
393 LLVMContext &Ctx = M.getContext();
394 const DataLayout &DL = M.getDataLayout();
395
396 // Create alias.scope and their lists. Each field in the new structure
397 // does not alias with all other fields.
398 SmallVector<MDNode *> AliasScopes;
399 SmallVector<Metadata *> NoAliasList;
400 const size_t NumberVars = LDSVarsToTransform.size();
401 if (NumberVars > 1) {
402 MDBuilder MDB(Ctx);
403 AliasScopes.reserve(NumberVars);
404 MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
405 for (size_t I = 0; I < NumberVars; I++) {
406 MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
407 AliasScopes.push_back(Scope);
408 }
409 NoAliasList.append(&AliasScopes[1], AliasScopes.end());
410 }
411
412 // Replace uses of ith variable with a constantexpr to the corresponding
413 // field of the instance that will be allocated by AMDGPUMachineFunction
414 for (size_t I = 0; I < NumberVars; I++) {
415 GlobalVariable *GV = LDSVarsToTransform[I];
416 Constant *GEP = LDSVarsToConstantGEP[GV];
417
418 GV->replaceUsesWithIf(GEP, Predicate);
419 if (GV->use_empty()) {
420 GV->eraseFromParent();
421 }
422
423 APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
424 GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
425 uint64_t Offset = APOff.getZExtValue();
426
427 Align A = commonAlignment(SGV->getAlign().valueOrOne(), Offset);
428
429 if (I)
430 NoAliasList[I - 1] = AliasScopes[I - 1];
431 MDNode *NoAlias =
432 NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
433 MDNode *AliasScope =
434 AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
435
436 refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
437 }
438 }
439
refineUsesAlignmentAndAA(Value * Ptr,Align A,const DataLayout & DL,MDNode * AliasScope,MDNode * NoAlias,unsigned MaxDepth=5)440 void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
441 MDNode *AliasScope, MDNode *NoAlias,
442 unsigned MaxDepth = 5) {
443 if (!MaxDepth || (A == 1 && !AliasScope))
444 return;
445
446 for (User *U : Ptr->users()) {
447 if (auto *I = dyn_cast<Instruction>(U)) {
448 if (AliasScope && I->mayReadOrWriteMemory()) {
449 MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
450 AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
451 : AliasScope);
452 I->setMetadata(LLVMContext::MD_alias_scope, AS);
453
454 MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
455 NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
456 I->setMetadata(LLVMContext::MD_noalias, NA);
457 }
458 }
459
460 if (auto *LI = dyn_cast<LoadInst>(U)) {
461 LI->setAlignment(std::max(A, LI->getAlign()));
462 continue;
463 }
464 if (auto *SI = dyn_cast<StoreInst>(U)) {
465 if (SI->getPointerOperand() == Ptr)
466 SI->setAlignment(std::max(A, SI->getAlign()));
467 continue;
468 }
469 if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
470 // None of atomicrmw operations can work on pointers, but let's
471 // check it anyway in case it will or we will process ConstantExpr.
472 if (AI->getPointerOperand() == Ptr)
473 AI->setAlignment(std::max(A, AI->getAlign()));
474 continue;
475 }
476 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
477 if (AI->getPointerOperand() == Ptr)
478 AI->setAlignment(std::max(A, AI->getAlign()));
479 continue;
480 }
481 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
482 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
483 APInt Off(BitWidth, 0);
484 if (GEP->getPointerOperand() == Ptr) {
485 Align GA;
486 if (GEP->accumulateConstantOffset(DL, Off))
487 GA = commonAlignment(A, Off.getLimitedValue());
488 refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
489 MaxDepth - 1);
490 }
491 continue;
492 }
493 if (auto *I = dyn_cast<Instruction>(U)) {
494 if (I->getOpcode() == Instruction::BitCast ||
495 I->getOpcode() == Instruction::AddrSpaceCast)
496 refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
497 }
498 }
499 }
500 };
501
502 } // namespace
503 char AMDGPULowerModuleLDS::ID = 0;
504
505 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
506
507 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
508 "Lower uses of LDS variables from non-kernel functions", false,
509 false)
510
createAMDGPULowerModuleLDSPass()511 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
512 return new AMDGPULowerModuleLDS();
513 }
514
run(Module & M,ModuleAnalysisManager &)515 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
516 ModuleAnalysisManager &) {
517 return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
518 : PreservedAnalyses::all();
519 }
520