1 //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This pass adds amdgpu.uniform metadata to IR values so this information 11 /// can be used during instruction selection. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 21 #include "llvm/IR/IRBuilder.h" 22 #include "llvm/IR/InstVisitor.h" 23 #include "llvm/InitializePasses.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 #define DEBUG_TYPE "amdgpu-annotate-uniform" 28 29 using namespace llvm; 30 31 namespace { 32 33 class AMDGPUAnnotateUniformValues : public FunctionPass, 34 public InstVisitor<AMDGPUAnnotateUniformValues> { 35 LegacyDivergenceAnalysis *DA; 36 MemoryDependenceResults *MDR; 37 LoopInfo *LI; 38 DenseMap<Value*, GetElementPtrInst*> noClobberClones; 39 bool isEntryFunc; 40 41 public: 42 static char ID; 43 AMDGPUAnnotateUniformValues() : 44 FunctionPass(ID) { } 45 bool doInitialization(Module &M) override; 46 bool runOnFunction(Function &F) override; 47 StringRef getPassName() const override { 48 return "AMDGPU Annotate Uniform Values"; 49 } 50 void getAnalysisUsage(AnalysisUsage &AU) const override { 51 AU.addRequired<LegacyDivergenceAnalysis>(); 52 AU.addRequired<MemoryDependenceWrapperPass>(); 53 AU.addRequired<LoopInfoWrapperPass>(); 54 AU.setPreservesAll(); 55 } 56 57 void visitBranchInst(BranchInst &I); 58 void visitLoadInst(LoadInst &I); 59 bool isClobberedInFunction(LoadInst * Load); 60 }; 61 62 } // End anonymous namespace 63 64 INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE, 65 "Add AMDGPU uniform metadata", false, false) 66 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 67 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 68 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 69 INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE, 70 "Add AMDGPU uniform metadata", false, false) 71 72 char AMDGPUAnnotateUniformValues::ID = 0; 73 74 static void setUniformMetadata(Instruction *I) { 75 I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {})); 76 } 77 static void setNoClobberMetadata(Instruction *I) { 78 I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {})); 79 } 80 81 static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) { 82 for (auto I : predecessors(Root)) 83 if (Set.insert(I)) 84 DFS(I, Set); 85 } 86 87 bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) { 88 // 1. get Loop for the Load->getparent(); 89 // 2. if it exists, collect all the BBs from the most outer 90 // loop and check for the writes. If NOT - start DFS over all preds. 91 // 3. Start DFS over all preds from the most outer loop header. 92 SetVector<BasicBlock *> Checklist; 93 BasicBlock *Start = Load->getParent(); 94 Checklist.insert(Start); 95 const Value *Ptr = Load->getPointerOperand(); 96 const Loop *L = LI->getLoopFor(Start); 97 if (L) { 98 const Loop *P = L; 99 do { 100 L = P; 101 P = P->getParentLoop(); 102 } while (P); 103 Checklist.insert(L->block_begin(), L->block_end()); 104 Start = L->getHeader(); 105 } 106 107 DFS(Start, Checklist); 108 for (auto &BB : Checklist) { 109 BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ? 110 BasicBlock::iterator(Load) : BB->end(); 111 auto Q = MDR->getPointerDependencyFrom( 112 MemoryLocation::getBeforeOrAfter(Ptr), true, StartIt, BB, Load); 113 if (Q.isClobber() || Q.isUnknown() || 114 // Store defines the load and thus clobbers it. 115 (Q.isDef() && Q.getInst()->mayWriteToMemory())) 116 return true; 117 } 118 return false; 119 } 120 121 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) { 122 if (DA->isUniform(&I)) 123 setUniformMetadata(I.getParent()->getTerminator()); 124 } 125 126 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) { 127 Value *Ptr = I.getPointerOperand(); 128 if (!DA->isUniform(Ptr)) 129 return; 130 auto isGlobalLoad = [&](LoadInst &Load)->bool { 131 return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 132 }; 133 // We're tracking up to the Function boundaries, and cannot go beyond because 134 // of FunctionPass restrictions. We can ensure that is memory not clobbered 135 // for memory operations that are live in to entry points only. 136 Instruction *PtrI = dyn_cast<Instruction>(Ptr); 137 138 if (!isEntryFunc) { 139 if (PtrI) 140 setUniformMetadata(PtrI); 141 return; 142 } 143 144 bool NotClobbered = false; 145 if (PtrI) 146 NotClobbered = !isClobberedInFunction(&I); 147 else if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) { 148 if (isGlobalLoad(I) && !isClobberedInFunction(&I)) { 149 NotClobbered = true; 150 // Lookup for the existing GEP 151 if (noClobberClones.count(Ptr)) { 152 PtrI = noClobberClones[Ptr]; 153 } else { 154 // Create GEP of the Value 155 Function *F = I.getParent()->getParent(); 156 Value *Idx = Constant::getIntegerValue( 157 Type::getInt32Ty(Ptr->getContext()), APInt(64, 0)); 158 // Insert GEP at the entry to make it dominate all uses 159 PtrI = GetElementPtrInst::Create( 160 Ptr->getType()->getPointerElementType(), Ptr, 161 ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI()); 162 } 163 I.replaceUsesOfWith(Ptr, PtrI); 164 } 165 } 166 167 if (PtrI) { 168 setUniformMetadata(PtrI); 169 if (NotClobbered) 170 setNoClobberMetadata(PtrI); 171 } 172 } 173 174 bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) { 175 return false; 176 } 177 178 bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) { 179 if (skipFunction(F)) 180 return false; 181 182 DA = &getAnalysis<LegacyDivergenceAnalysis>(); 183 MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 184 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 185 isEntryFunc = AMDGPU::isEntryFunctionCC(F.getCallingConv()); 186 187 visit(F); 188 noClobberClones.clear(); 189 return true; 190 } 191 192 FunctionPass * 193 llvm::createAMDGPUAnnotateUniformValues() { 194 return new AMDGPUAnnotateUniformValues(); 195 } 196