1 //===-- AMDGPUAlwaysInlinePass.cpp - Promote Allocas ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass marks all internal functions as always_inline and creates
11 /// duplicates of all other functions and marks the duplicates as always_inline.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/PassManager.h"
21 #include "llvm/Transforms/Utils/Cloning.h"
22 
23 using namespace llvm;
24 
25 namespace {
26 
27 static cl::opt<bool> StressCalls(
28   "amdgpu-stress-function-calls",
29   cl::Hidden,
30   cl::desc("Force all functions to be noinline"),
31   cl::init(false));
32 
33 class AMDGPUAlwaysInline : public ModulePass {
34   bool GlobalOpt;
35 
36 public:
37   static char ID;
38 
39   AMDGPUAlwaysInline(bool GlobalOpt = false) :
40     ModulePass(ID), GlobalOpt(GlobalOpt) { }
41   bool runOnModule(Module &M) override;
42 
43   void getAnalysisUsage(AnalysisUsage &AU) const override {
44     AU.setPreservesAll();
45  }
46 };
47 
48 } // End anonymous namespace
49 
50 INITIALIZE_PASS(AMDGPUAlwaysInline, "amdgpu-always-inline",
51                 "AMDGPU Inline All Functions", false, false)
52 
53 char AMDGPUAlwaysInline::ID = 0;
54 
55 static void
56 recursivelyVisitUsers(GlobalValue &GV,
57                       SmallPtrSetImpl<Function *> &FuncsToAlwaysInline) {
58   SmallVector<User *, 16> Stack;
59 
60   SmallPtrSet<const Value *, 8> Visited;
61 
62   for (User *U : GV.users())
63     Stack.push_back(U);
64 
65   while (!Stack.empty()) {
66     User *U = Stack.pop_back_val();
67     if (!Visited.insert(U).second)
68       continue;
69 
70     if (Instruction *I = dyn_cast<Instruction>(U)) {
71       Function *F = I->getParent()->getParent();
72       if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
73         // FIXME: This is a horrible hack. We should always respect noinline,
74         // and just let us hit the error when we can't handle this.
75         //
76         // Unfortunately, clang adds noinline to all functions at -O0. We have
77         // to override this here. until that's fixed.
78         F->removeFnAttr(Attribute::NoInline);
79 
80         FuncsToAlwaysInline.insert(F);
81         Stack.push_back(F);
82       }
83 
84       // No need to look at further users, but we do need to inline any callers.
85       continue;
86     }
87 
88     for (User *UU : U->users())
89       Stack.push_back(UU);
90   }
91 }
92 
93 static bool alwaysInlineImpl(Module &M, bool GlobalOpt) {
94   std::vector<GlobalAlias*> AliasesToRemove;
95 
96   SmallPtrSet<Function *, 8> FuncsToAlwaysInline;
97   SmallPtrSet<Function *, 8> FuncsToNoInline;
98 
99   for (GlobalAlias &A : M.aliases()) {
100     if (Function* F = dyn_cast<Function>(A.getAliasee())) {
101       A.replaceAllUsesWith(F);
102       AliasesToRemove.push_back(&A);
103     }
104 
105     // FIXME: If the aliasee isn't a function, it's some kind of constant expr
106     // cast that won't be inlined through.
107   }
108 
109   if (GlobalOpt) {
110     for (GlobalAlias* A : AliasesToRemove) {
111       A->eraseFromParent();
112     }
113   }
114 
115   // Always force inlining of any function that uses an LDS global address. This
116   // is something of a workaround because we don't have a way of supporting LDS
117   // objects defined in functions. LDS is always allocated by a kernel, and it
118   // is difficult to manage LDS usage if a function may be used by multiple
119   // kernels.
120   //
121   // OpenCL doesn't allow declaring LDS in non-kernels, so in practice this
122   // should only appear when IPO passes manages to move LDs defined in a kernel
123   // into a single user function.
124 
125   for (GlobalVariable &GV : M.globals()) {
126     // TODO: Region address
127     unsigned AS = GV.getAddressSpace();
128     if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS)
129       continue;
130 
131     recursivelyVisitUsers(GV, FuncsToAlwaysInline);
132   }
133 
134   if (!AMDGPUTargetMachine::EnableFunctionCalls || StressCalls) {
135     auto IncompatAttr
136       = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline;
137 
138     for (Function &F : M) {
139       if (!F.isDeclaration() && !F.use_empty() &&
140           !F.hasFnAttribute(IncompatAttr)) {
141         if (StressCalls) {
142           if (!FuncsToAlwaysInline.count(&F))
143             FuncsToNoInline.insert(&F);
144         } else
145           FuncsToAlwaysInline.insert(&F);
146       }
147     }
148   }
149 
150   for (Function *F : FuncsToAlwaysInline)
151     F->addFnAttr(Attribute::AlwaysInline);
152 
153   for (Function *F : FuncsToNoInline)
154     F->addFnAttr(Attribute::NoInline);
155 
156   return !FuncsToAlwaysInline.empty() || !FuncsToNoInline.empty();
157 }
158 
159 bool AMDGPUAlwaysInline::runOnModule(Module &M) {
160   return alwaysInlineImpl(M, GlobalOpt);
161 }
162 
163 ModulePass *llvm::createAMDGPUAlwaysInlinePass(bool GlobalOpt) {
164   return new AMDGPUAlwaysInline(GlobalOpt);
165 }
166 
167 PreservedAnalyses AMDGPUAlwaysInlinePass::run(Module &M,
168                                               ModuleAnalysisManager &AM) {
169   alwaysInlineImpl(M, GlobalOpt);
170   return PreservedAnalyses::all();
171 }
172