1372d796aSMatt Arsenault //===-- AMDGPULowerKernelAttributes.cpp ------------------------------------------===// 2372d796aSMatt Arsenault // 3*2946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*2946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information. 5*2946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6372d796aSMatt Arsenault // 7372d796aSMatt Arsenault //===----------------------------------------------------------------------===// 8372d796aSMatt Arsenault // 9372d796aSMatt Arsenault /// \file This pass does attempts to make use of reqd_work_group_size metadata 10372d796aSMatt Arsenault /// to eliminate loads from the dispatch packet and to constant fold OpenCL 11372d796aSMatt Arsenault /// get_local_size-like functions. 12372d796aSMatt Arsenault // 13372d796aSMatt Arsenault //===----------------------------------------------------------------------===// 14372d796aSMatt Arsenault 15372d796aSMatt Arsenault #include "AMDGPU.h" 16372d796aSMatt Arsenault #include "AMDGPUTargetMachine.h" 17372d796aSMatt Arsenault #include "llvm/Analysis/ValueTracking.h" 18372d796aSMatt Arsenault #include "llvm/CodeGen/Passes.h" 19372d796aSMatt Arsenault #include "llvm/CodeGen/TargetPassConfig.h" 20372d796aSMatt Arsenault #include "llvm/IR/Constants.h" 21372d796aSMatt Arsenault #include "llvm/IR/Function.h" 22372d796aSMatt Arsenault #include "llvm/IR/Instructions.h" 23372d796aSMatt Arsenault #include "llvm/IR/PatternMatch.h" 24372d796aSMatt Arsenault #include "llvm/Pass.h" 25372d796aSMatt Arsenault 26372d796aSMatt Arsenault #define DEBUG_TYPE "amdgpu-lower-kernel-attributes" 27372d796aSMatt Arsenault 28372d796aSMatt Arsenault using namespace llvm; 29372d796aSMatt Arsenault 30372d796aSMatt Arsenault namespace { 31372d796aSMatt Arsenault 32372d796aSMatt Arsenault // Field offsets in hsa_kernel_dispatch_packet_t. 33372d796aSMatt Arsenault enum DispatchPackedOffsets { 34372d796aSMatt Arsenault WORKGROUP_SIZE_X = 4, 35372d796aSMatt Arsenault WORKGROUP_SIZE_Y = 6, 36372d796aSMatt Arsenault WORKGROUP_SIZE_Z = 8, 37372d796aSMatt Arsenault 38372d796aSMatt Arsenault GRID_SIZE_X = 12, 39372d796aSMatt Arsenault GRID_SIZE_Y = 16, 40372d796aSMatt Arsenault GRID_SIZE_Z = 20 41372d796aSMatt Arsenault }; 42372d796aSMatt Arsenault 43372d796aSMatt Arsenault class AMDGPULowerKernelAttributes : public ModulePass { 44372d796aSMatt Arsenault Module *Mod = nullptr; 45372d796aSMatt Arsenault 46372d796aSMatt Arsenault public: 47372d796aSMatt Arsenault static char ID; 48372d796aSMatt Arsenault 49372d796aSMatt Arsenault AMDGPULowerKernelAttributes() : ModulePass(ID) {} 50372d796aSMatt Arsenault 51372d796aSMatt Arsenault bool processUse(CallInst *CI); 52372d796aSMatt Arsenault 53372d796aSMatt Arsenault bool doInitialization(Module &M) override; 54372d796aSMatt Arsenault bool runOnModule(Module &M) override; 55372d796aSMatt Arsenault 56372d796aSMatt Arsenault StringRef getPassName() const override { 57372d796aSMatt Arsenault return "AMDGPU Kernel Attributes"; 58372d796aSMatt Arsenault } 59372d796aSMatt Arsenault 60372d796aSMatt Arsenault void getAnalysisUsage(AnalysisUsage &AU) const override { 61372d796aSMatt Arsenault AU.setPreservesAll(); 62372d796aSMatt Arsenault } 63372d796aSMatt Arsenault }; 64372d796aSMatt Arsenault 65372d796aSMatt Arsenault } // end anonymous namespace 66372d796aSMatt Arsenault 67372d796aSMatt Arsenault bool AMDGPULowerKernelAttributes::doInitialization(Module &M) { 68372d796aSMatt Arsenault Mod = &M; 69372d796aSMatt Arsenault return false; 70372d796aSMatt Arsenault } 71372d796aSMatt Arsenault 72372d796aSMatt Arsenault bool AMDGPULowerKernelAttributes::processUse(CallInst *CI) { 73372d796aSMatt Arsenault Function *F = CI->getParent()->getParent(); 74372d796aSMatt Arsenault 75372d796aSMatt Arsenault auto MD = F->getMetadata("reqd_work_group_size"); 76372d796aSMatt Arsenault const bool HasReqdWorkGroupSize = MD && MD->getNumOperands() == 3; 77372d796aSMatt Arsenault 78372d796aSMatt Arsenault const bool HasUniformWorkGroupSize = 79372d796aSMatt Arsenault F->getFnAttribute("uniform-work-group-size").getValueAsString() == "true"; 80372d796aSMatt Arsenault 81372d796aSMatt Arsenault if (!HasReqdWorkGroupSize && !HasUniformWorkGroupSize) 82372d796aSMatt Arsenault return false; 83372d796aSMatt Arsenault 84372d796aSMatt Arsenault Value *WorkGroupSizeX = nullptr; 85372d796aSMatt Arsenault Value *WorkGroupSizeY = nullptr; 86372d796aSMatt Arsenault Value *WorkGroupSizeZ = nullptr; 87372d796aSMatt Arsenault 88372d796aSMatt Arsenault Value *GridSizeX = nullptr; 89372d796aSMatt Arsenault Value *GridSizeY = nullptr; 90372d796aSMatt Arsenault Value *GridSizeZ = nullptr; 91372d796aSMatt Arsenault 92372d796aSMatt Arsenault const DataLayout &DL = Mod->getDataLayout(); 93372d796aSMatt Arsenault 94372d796aSMatt Arsenault // We expect to see several GEP users, casted to the appropriate type and 95372d796aSMatt Arsenault // loaded. 96372d796aSMatt Arsenault for (User *U : CI->users()) { 97372d796aSMatt Arsenault if (!U->hasOneUse()) 98372d796aSMatt Arsenault continue; 99372d796aSMatt Arsenault 100372d796aSMatt Arsenault int64_t Offset = 0; 101372d796aSMatt Arsenault if (GetPointerBaseWithConstantOffset(U, Offset, DL) != CI) 102372d796aSMatt Arsenault continue; 103372d796aSMatt Arsenault 104372d796aSMatt Arsenault auto *BCI = dyn_cast<BitCastInst>(*U->user_begin()); 105372d796aSMatt Arsenault if (!BCI || !BCI->hasOneUse()) 106372d796aSMatt Arsenault continue; 107372d796aSMatt Arsenault 108372d796aSMatt Arsenault auto *Load = dyn_cast<LoadInst>(*BCI->user_begin()); 109372d796aSMatt Arsenault if (!Load || !Load->isSimple()) 110372d796aSMatt Arsenault continue; 111372d796aSMatt Arsenault 112372d796aSMatt Arsenault unsigned LoadSize = DL.getTypeStoreSize(Load->getType()); 113372d796aSMatt Arsenault 114372d796aSMatt Arsenault // TODO: Handle merged loads. 115372d796aSMatt Arsenault switch (Offset) { 116372d796aSMatt Arsenault case WORKGROUP_SIZE_X: 117372d796aSMatt Arsenault if (LoadSize == 2) 118372d796aSMatt Arsenault WorkGroupSizeX = Load; 119372d796aSMatt Arsenault break; 120372d796aSMatt Arsenault case WORKGROUP_SIZE_Y: 121372d796aSMatt Arsenault if (LoadSize == 2) 122372d796aSMatt Arsenault WorkGroupSizeY = Load; 123372d796aSMatt Arsenault break; 124372d796aSMatt Arsenault case WORKGROUP_SIZE_Z: 125372d796aSMatt Arsenault if (LoadSize == 2) 126372d796aSMatt Arsenault WorkGroupSizeZ = Load; 127372d796aSMatt Arsenault break; 128372d796aSMatt Arsenault case GRID_SIZE_X: 129372d796aSMatt Arsenault if (LoadSize == 4) 130372d796aSMatt Arsenault GridSizeX = Load; 131372d796aSMatt Arsenault break; 132372d796aSMatt Arsenault case GRID_SIZE_Y: 133372d796aSMatt Arsenault if (LoadSize == 4) 134372d796aSMatt Arsenault GridSizeY = Load; 135372d796aSMatt Arsenault break; 136372d796aSMatt Arsenault case GRID_SIZE_Z: 137372d796aSMatt Arsenault if (LoadSize == 4) 138372d796aSMatt Arsenault GridSizeZ = Load; 139372d796aSMatt Arsenault break; 140372d796aSMatt Arsenault default: 141372d796aSMatt Arsenault break; 142372d796aSMatt Arsenault } 143372d796aSMatt Arsenault } 144372d796aSMatt Arsenault 145372d796aSMatt Arsenault // Pattern match the code used to handle partial workgroup dispatches in the 146372d796aSMatt Arsenault // library implementation of get_local_size, so the entire function can be 147372d796aSMatt Arsenault // constant folded with a known group size. 148372d796aSMatt Arsenault // 149372d796aSMatt Arsenault // uint r = grid_size - group_id * group_size; 150372d796aSMatt Arsenault // get_local_size = (r < group_size) ? r : group_size; 151372d796aSMatt Arsenault // 152372d796aSMatt Arsenault // If we have uniform-work-group-size (which is the default in OpenCL 1.2), 153372d796aSMatt Arsenault // the grid_size is required to be a multiple of group_size). In this case: 154372d796aSMatt Arsenault // 155372d796aSMatt Arsenault // grid_size - (group_id * group_size) < group_size 156372d796aSMatt Arsenault // -> 157372d796aSMatt Arsenault // grid_size < group_size + (group_id * group_size) 158372d796aSMatt Arsenault // 159372d796aSMatt Arsenault // (grid_size / group_size) < 1 + group_id 160372d796aSMatt Arsenault // 161372d796aSMatt Arsenault // grid_size / group_size is at least 1, so we can conclude the select 162372d796aSMatt Arsenault // condition is false (except for group_id == 0, where the select result is 163372d796aSMatt Arsenault // the same). 164372d796aSMatt Arsenault 165372d796aSMatt Arsenault bool MadeChange = false; 166372d796aSMatt Arsenault Value *WorkGroupSizes[3] = { WorkGroupSizeX, WorkGroupSizeY, WorkGroupSizeZ }; 167372d796aSMatt Arsenault Value *GridSizes[3] = { GridSizeX, GridSizeY, GridSizeZ }; 168372d796aSMatt Arsenault 169372d796aSMatt Arsenault for (int I = 0; HasUniformWorkGroupSize && I < 3; ++I) { 170372d796aSMatt Arsenault Value *GroupSize = WorkGroupSizes[I]; 171372d796aSMatt Arsenault Value *GridSize = GridSizes[I]; 172372d796aSMatt Arsenault if (!GroupSize || !GridSize) 173372d796aSMatt Arsenault continue; 174372d796aSMatt Arsenault 175372d796aSMatt Arsenault for (User *U : GroupSize->users()) { 176372d796aSMatt Arsenault auto *ZextGroupSize = dyn_cast<ZExtInst>(U); 177372d796aSMatt Arsenault if (!ZextGroupSize) 178372d796aSMatt Arsenault continue; 179372d796aSMatt Arsenault 180372d796aSMatt Arsenault for (User *ZextUser : ZextGroupSize->users()) { 181372d796aSMatt Arsenault auto *SI = dyn_cast<SelectInst>(ZextUser); 182372d796aSMatt Arsenault if (!SI) 183372d796aSMatt Arsenault continue; 184372d796aSMatt Arsenault 185372d796aSMatt Arsenault using namespace llvm::PatternMatch; 186372d796aSMatt Arsenault auto GroupIDIntrin = I == 0 ? 187372d796aSMatt Arsenault m_Intrinsic<Intrinsic::amdgcn_workgroup_id_x>() : 188372d796aSMatt Arsenault (I == 1 ? m_Intrinsic<Intrinsic::amdgcn_workgroup_id_y>() : 189372d796aSMatt Arsenault m_Intrinsic<Intrinsic::amdgcn_workgroup_id_z>()); 190372d796aSMatt Arsenault 191372d796aSMatt Arsenault auto SubExpr = m_Sub(m_Specific(GridSize), 192372d796aSMatt Arsenault m_Mul(GroupIDIntrin, m_Specific(ZextGroupSize))); 193372d796aSMatt Arsenault 194372d796aSMatt Arsenault ICmpInst::Predicate Pred; 195372d796aSMatt Arsenault if (match(SI, 196372d796aSMatt Arsenault m_Select(m_ICmp(Pred, SubExpr, m_Specific(ZextGroupSize)), 197372d796aSMatt Arsenault SubExpr, 198372d796aSMatt Arsenault m_Specific(ZextGroupSize))) && 199372d796aSMatt Arsenault Pred == ICmpInst::ICMP_ULT) { 200372d796aSMatt Arsenault if (HasReqdWorkGroupSize) { 201372d796aSMatt Arsenault ConstantInt *KnownSize 202372d796aSMatt Arsenault = mdconst::extract<ConstantInt>(MD->getOperand(I)); 203372d796aSMatt Arsenault SI->replaceAllUsesWith(ConstantExpr::getIntegerCast(KnownSize, 204372d796aSMatt Arsenault SI->getType(), 205372d796aSMatt Arsenault false)); 206372d796aSMatt Arsenault } else { 207372d796aSMatt Arsenault SI->replaceAllUsesWith(ZextGroupSize); 208372d796aSMatt Arsenault } 209372d796aSMatt Arsenault 210372d796aSMatt Arsenault MadeChange = true; 211372d796aSMatt Arsenault } 212372d796aSMatt Arsenault } 213372d796aSMatt Arsenault } 214372d796aSMatt Arsenault } 215372d796aSMatt Arsenault 216372d796aSMatt Arsenault if (!HasReqdWorkGroupSize) 217372d796aSMatt Arsenault return MadeChange; 218372d796aSMatt Arsenault 219372d796aSMatt Arsenault // Eliminate any other loads we can from the dispatch packet. 220372d796aSMatt Arsenault for (int I = 0; I < 3; ++I) { 221372d796aSMatt Arsenault Value *GroupSize = WorkGroupSizes[I]; 222372d796aSMatt Arsenault if (!GroupSize) 223372d796aSMatt Arsenault continue; 224372d796aSMatt Arsenault 225372d796aSMatt Arsenault ConstantInt *KnownSize = mdconst::extract<ConstantInt>(MD->getOperand(I)); 226372d796aSMatt Arsenault GroupSize->replaceAllUsesWith( 227372d796aSMatt Arsenault ConstantExpr::getIntegerCast(KnownSize, 228372d796aSMatt Arsenault GroupSize->getType(), 229372d796aSMatt Arsenault false)); 230372d796aSMatt Arsenault MadeChange = true; 231372d796aSMatt Arsenault } 232372d796aSMatt Arsenault 233372d796aSMatt Arsenault return MadeChange; 234372d796aSMatt Arsenault } 235372d796aSMatt Arsenault 236372d796aSMatt Arsenault // TODO: Move makeLIDRangeMetadata usage into here. Seem to not get 237372d796aSMatt Arsenault // TargetPassConfig for subtarget. 238372d796aSMatt Arsenault bool AMDGPULowerKernelAttributes::runOnModule(Module &M) { 239372d796aSMatt Arsenault StringRef DispatchPtrName 240372d796aSMatt Arsenault = Intrinsic::getName(Intrinsic::amdgcn_dispatch_ptr); 241372d796aSMatt Arsenault 242372d796aSMatt Arsenault Function *DispatchPtr = Mod->getFunction(DispatchPtrName); 243372d796aSMatt Arsenault if (!DispatchPtr) // Dispatch ptr not used. 244372d796aSMatt Arsenault return false; 245372d796aSMatt Arsenault 246372d796aSMatt Arsenault bool MadeChange = false; 247372d796aSMatt Arsenault 248372d796aSMatt Arsenault SmallPtrSet<Instruction *, 4> HandledUses; 249372d796aSMatt Arsenault for (auto *U : DispatchPtr->users()) { 250372d796aSMatt Arsenault CallInst *CI = cast<CallInst>(U); 251372d796aSMatt Arsenault if (HandledUses.insert(CI).second) { 252372d796aSMatt Arsenault if (processUse(CI)) 253372d796aSMatt Arsenault MadeChange = true; 254372d796aSMatt Arsenault } 255372d796aSMatt Arsenault } 256372d796aSMatt Arsenault 257372d796aSMatt Arsenault return MadeChange; 258372d796aSMatt Arsenault } 259372d796aSMatt Arsenault 260372d796aSMatt Arsenault INITIALIZE_PASS_BEGIN(AMDGPULowerKernelAttributes, DEBUG_TYPE, 261372d796aSMatt Arsenault "AMDGPU IR optimizations", false, false) 262372d796aSMatt Arsenault INITIALIZE_PASS_END(AMDGPULowerKernelAttributes, DEBUG_TYPE, "AMDGPU IR optimizations", 263372d796aSMatt Arsenault false, false) 264372d796aSMatt Arsenault 265372d796aSMatt Arsenault char AMDGPULowerKernelAttributes::ID = 0; 266372d796aSMatt Arsenault 267372d796aSMatt Arsenault ModulePass *llvm::createAMDGPULowerKernelAttributesPass() { 268372d796aSMatt Arsenault return new AMDGPULowerKernelAttributes(); 269372d796aSMatt Arsenault } 270