1 //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass adds amdgpu.uniform metadata to IR values so this information
11 /// can be used during instruction selection.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
18 #include "llvm/Analysis/MemorySSA.h"
19 #include "llvm/IR/InstVisitor.h"
20 #include "llvm/InitializePasses.h"
21 
22 #define DEBUG_TYPE "amdgpu-annotate-uniform"
23 
24 using namespace llvm;
25 
26 namespace {
27 
28 class AMDGPUAnnotateUniformValues : public FunctionPass,
29                        public InstVisitor<AMDGPUAnnotateUniformValues> {
30   LegacyDivergenceAnalysis *DA;
31   MemorySSA *MSSA;
32   DenseMap<Value*, GetElementPtrInst*> noClobberClones;
33   bool isEntryFunc;
34 
35 public:
36   static char ID;
37   AMDGPUAnnotateUniformValues() :
38     FunctionPass(ID) { }
39   bool doInitialization(Module &M) override;
40   bool runOnFunction(Function &F) override;
41   StringRef getPassName() const override {
42     return "AMDGPU Annotate Uniform Values";
43   }
44   void getAnalysisUsage(AnalysisUsage &AU) const override {
45     AU.addRequired<LegacyDivergenceAnalysis>();
46     AU.addRequired<MemorySSAWrapperPass>();
47     AU.setPreservesAll();
48  }
49 
50   void visitBranchInst(BranchInst &I);
51   void visitLoadInst(LoadInst &I);
52   bool isClobberedInFunction(LoadInst * Load);
53 };
54 
55 } // End anonymous namespace
56 
57 INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
58                       "Add AMDGPU uniform metadata", false, false)
59 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
60 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
61 INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
62                     "Add AMDGPU uniform metadata", false, false)
63 
64 char AMDGPUAnnotateUniformValues::ID = 0;
65 
66 static void setUniformMetadata(Instruction *I) {
67   I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
68 }
69 static void setNoClobberMetadata(Instruction *I) {
70   I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
71 }
72 
73 bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
74   const MemoryAccess *MA = MSSA->getWalker()->getClobberingMemoryAccess(Load);
75   return !MSSA->isLiveOnEntryDef(MA);
76 }
77 
78 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
79   if (DA->isUniform(&I))
80     setUniformMetadata(&I);
81 }
82 
83 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
84   Value *Ptr = I.getPointerOperand();
85   if (!DA->isUniform(Ptr))
86     return;
87   // We're tracking up to the Function boundaries, and cannot go beyond because
88   // of FunctionPass restrictions. We can ensure that is memory not clobbered
89   // for memory operations that are live in to entry points only.
90   Instruction *PtrI = dyn_cast<Instruction>(Ptr);
91 
92   if (!isEntryFunc) {
93     if (PtrI)
94       setUniformMetadata(PtrI);
95     return;
96   }
97 
98   bool NotClobbered = false;
99   bool GlobalLoad = I.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
100   if (PtrI)
101     NotClobbered = GlobalLoad && !isClobberedInFunction(&I);
102   else if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
103     if (GlobalLoad && !isClobberedInFunction(&I)) {
104       NotClobbered = true;
105       // Lookup for the existing GEP
106       if (noClobberClones.count(Ptr)) {
107         PtrI = noClobberClones[Ptr];
108       } else {
109         // Create GEP of the Value
110         Function *F = I.getParent()->getParent();
111         Value *Idx = Constant::getIntegerValue(
112           Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
113         // Insert GEP at the entry to make it dominate all uses
114         PtrI = GetElementPtrInst::Create(I.getType(), Ptr,
115                                          ArrayRef<Value *>(Idx), Twine(""),
116                                          F->getEntryBlock().getFirstNonPHI());
117       }
118       I.replaceUsesOfWith(Ptr, PtrI);
119     }
120   }
121 
122   if (PtrI) {
123     setUniformMetadata(PtrI);
124     if (NotClobbered)
125       setNoClobberMetadata(PtrI);
126   }
127 }
128 
129 bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
130   return false;
131 }
132 
133 bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
134   if (skipFunction(F))
135     return false;
136 
137   DA = &getAnalysis<LegacyDivergenceAnalysis>();
138   MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
139   isEntryFunc = AMDGPU::isEntryFunctionCC(F.getCallingConv());
140 
141   visit(F);
142   noClobberClones.clear();
143   return true;
144 }
145 
146 FunctionPass *
147 llvm::createAMDGPUAnnotateUniformValues() {
148   return new AMDGPUAnnotateUniformValues();
149 }
150