1 //===-- NVPTXTargetMachine.cpp - Define TargetMachine for NVPTX -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Top-level implementation for the NVPTX target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "NVPTXTargetMachine.h"
15 #include "NVPTX.h"
16 #include "NVPTXAllocaHoisting.h"
17 #include "NVPTXLowerAggrCopies.h"
18 #include "NVPTXTargetObjectFile.h"
19 #include "NVPTXTargetTransformInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetPassConfig.h"
25 #include "llvm/IR/LegacyPassManager.h"
26 #include "llvm/Pass.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/TargetRegistry.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
32 #include "llvm/Transforms/Scalar.h"
33 #include "llvm/Transforms/Scalar/GVN.h"
34 #include "llvm/Transforms/Vectorize.h"
35 #include <cassert>
36 #include <string>
37 
38 using namespace llvm;
39 
40 // LSV is still relatively new; this switch lets us turn it off in case we
41 // encounter (or suspect) a bug.
42 static cl::opt<bool>
43     DisableLoadStoreVectorizer("disable-nvptx-load-store-vectorizer",
44                                cl::desc("Disable load/store vectorizer"),
45                                cl::init(false), cl::Hidden);
46 
47 // TODO: Remove this flag when we are confident with no regressions.
48 static cl::opt<bool> DisableRequireStructuredCFG(
49     "disable-nvptx-require-structured-cfg",
50     cl::desc("Transitional flag to turn off NVPTX's requirement on preserving "
51              "structured CFG. The requirement should be disabled only when "
52              "unexpected regressions happen."),
53     cl::init(false), cl::Hidden);
54 
55 static cl::opt<bool> UseShortPointersOpt(
56     "nvptx-short-ptr",
57     cl::desc(
58         "Use 32-bit pointers for accessing const/local/shared address spaces."),
59     cl::init(false), cl::Hidden);
60 
61 namespace llvm {
62 
63 void initializeNVVMIntrRangePass(PassRegistry&);
64 void initializeNVVMReflectPass(PassRegistry&);
65 void initializeGenericToNVVMPass(PassRegistry&);
66 void initializeNVPTXAllocaHoistingPass(PassRegistry &);
67 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
68 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
69 void initializeNVPTXLowerArgsPass(PassRegistry &);
70 void initializeNVPTXLowerAllocaPass(PassRegistry &);
71 
72 } // end namespace llvm
73 
74 extern "C" void LLVMInitializeNVPTXTarget() {
75   // Register the target.
76   RegisterTargetMachine<NVPTXTargetMachine32> X(getTheNVPTXTarget32());
77   RegisterTargetMachine<NVPTXTargetMachine64> Y(getTheNVPTXTarget64());
78 
79   // FIXME: This pass is really intended to be invoked during IR optimization,
80   // but it's very NVPTX-specific.
81   PassRegistry &PR = *PassRegistry::getPassRegistry();
82   initializeNVVMReflectPass(PR);
83   initializeNVVMIntrRangePass(PR);
84   initializeGenericToNVVMPass(PR);
85   initializeNVPTXAllocaHoistingPass(PR);
86   initializeNVPTXAssignValidGlobalNamesPass(PR);
87   initializeNVPTXLowerArgsPass(PR);
88   initializeNVPTXLowerAllocaPass(PR);
89   initializeNVPTXLowerAggrCopiesPass(PR);
90 }
91 
92 static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
93   std::string Ret = "e";
94 
95   if (!is64Bit)
96     Ret += "-p:32:32";
97   else if (UseShortPointers)
98     Ret += "-p3:32:32-p4:32:32-p5:32:32";
99 
100   Ret += "-i64:64-i128:128-v16:16-v32:32-n16:32:64";
101 
102   return Ret;
103 }
104 
105 NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
106                                        StringRef CPU, StringRef FS,
107                                        const TargetOptions &Options,
108                                        Optional<Reloc::Model> RM,
109                                        Optional<CodeModel::Model> CM,
110                                        CodeGenOpt::Level OL, bool is64bit)
111     // The pic relocation model is used regardless of what the client has
112     // specified, as it is the only relocation model currently supported.
113     : LLVMTargetMachine(T, computeDataLayout(is64bit, UseShortPointersOpt), TT,
114                         CPU, FS, Options, Reloc::PIC_,
115                         getEffectiveCodeModel(CM, CodeModel::Small), OL),
116       is64bit(is64bit), UseShortPointers(UseShortPointersOpt),
117       TLOF(llvm::make_unique<NVPTXTargetObjectFile>()),
118       Subtarget(TT, CPU, FS, *this) {
119   if (TT.getOS() == Triple::NVCL)
120     drvInterface = NVPTX::NVCL;
121   else
122     drvInterface = NVPTX::CUDA;
123   if (!DisableRequireStructuredCFG)
124     setRequiresStructuredCFG(true);
125   initAsmInfo();
126 }
127 
128 NVPTXTargetMachine::~NVPTXTargetMachine() = default;
129 
130 void NVPTXTargetMachine32::anchor() {}
131 
132 NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT,
133                                            StringRef CPU, StringRef FS,
134                                            const TargetOptions &Options,
135                                            Optional<Reloc::Model> RM,
136                                            Optional<CodeModel::Model> CM,
137                                            CodeGenOpt::Level OL, bool JIT)
138     : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
139 
140 void NVPTXTargetMachine64::anchor() {}
141 
142 NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT,
143                                            StringRef CPU, StringRef FS,
144                                            const TargetOptions &Options,
145                                            Optional<Reloc::Model> RM,
146                                            Optional<CodeModel::Model> CM,
147                                            CodeGenOpt::Level OL, bool JIT)
148     : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
149 
150 namespace {
151 
152 class NVPTXPassConfig : public TargetPassConfig {
153 public:
154   NVPTXPassConfig(NVPTXTargetMachine &TM, PassManagerBase &PM)
155       : TargetPassConfig(TM, PM) {}
156 
157   NVPTXTargetMachine &getNVPTXTargetMachine() const {
158     return getTM<NVPTXTargetMachine>();
159   }
160 
161   void addIRPasses() override;
162   bool addInstSelector() override;
163   void addPostRegAlloc() override;
164   void addMachineSSAOptimization() override;
165 
166   FunctionPass *createTargetRegisterAllocator(bool) override;
167   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
168   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
169 
170 private:
171   // If the opt level is aggressive, add GVN; otherwise, add EarlyCSE. This
172   // function is only called in opt mode.
173   void addEarlyCSEOrGVNPass();
174 
175   // Add passes that propagate special memory spaces.
176   void addAddressSpaceInferencePasses();
177 
178   // Add passes that perform straight-line scalar optimizations.
179   void addStraightLineScalarOptimizationPasses();
180 };
181 
182 } // end anonymous namespace
183 
184 TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
185   return new NVPTXPassConfig(*this, PM);
186 }
187 
188 void NVPTXTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
189   Builder.addExtension(
190     PassManagerBuilder::EP_EarlyAsPossible,
191     [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
192       PM.add(createNVVMReflectPass(Subtarget.getSmVersion()));
193       PM.add(createNVVMIntrRangePass(Subtarget.getSmVersion()));
194     });
195 }
196 
197 TargetTransformInfo
198 NVPTXTargetMachine::getTargetTransformInfo(const Function &F) {
199   return TargetTransformInfo(NVPTXTTIImpl(this, F));
200 }
201 
202 void NVPTXPassConfig::addEarlyCSEOrGVNPass() {
203   if (getOptLevel() == CodeGenOpt::Aggressive)
204     addPass(createGVNPass());
205   else
206     addPass(createEarlyCSEPass());
207 }
208 
209 void NVPTXPassConfig::addAddressSpaceInferencePasses() {
210   // NVPTXLowerArgs emits alloca for byval parameters which can often
211   // be eliminated by SROA.
212   addPass(createSROAPass());
213   addPass(createNVPTXLowerAllocaPass());
214   addPass(createInferAddressSpacesPass());
215 }
216 
217 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() {
218   addPass(createSeparateConstOffsetFromGEPPass());
219   addPass(createSpeculativeExecutionPass());
220   // ReassociateGEPs exposes more opportunites for SLSR. See
221   // the example in reassociate-geps-and-slsr.ll.
222   addPass(createStraightLineStrengthReducePass());
223   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
224   // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE
225   // for some of our benchmarks.
226   addEarlyCSEOrGVNPass();
227   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
228   addPass(createNaryReassociatePass());
229   // NaryReassociate on GEPs creates redundant common expressions, so run
230   // EarlyCSE after it.
231   addPass(createEarlyCSEPass());
232 }
233 
234 void NVPTXPassConfig::addIRPasses() {
235   // The following passes are known to not play well with virtual regs hanging
236   // around after register allocation (which in our case, is *all* registers).
237   // We explicitly disable them here.  We do, however, need some functionality
238   // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the
239   // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp).
240   disablePass(&PrologEpilogCodeInserterID);
241   disablePass(&MachineCopyPropagationID);
242   disablePass(&TailDuplicateID);
243   disablePass(&StackMapLivenessID);
244   disablePass(&LiveDebugValuesID);
245   disablePass(&PostRAMachineSinkingID);
246   disablePass(&PostRASchedulerID);
247   disablePass(&FuncletLayoutID);
248   disablePass(&PatchableFunctionID);
249   disablePass(&ShrinkWrapID);
250 
251   // NVVMReflectPass is added in addEarlyAsPossiblePasses, so hopefully running
252   // it here does nothing.  But since we need it for correctness when lowering
253   // to NVPTX, run it here too, in case whoever built our pass pipeline didn't
254   // call addEarlyAsPossiblePasses.
255   const NVPTXSubtarget &ST = *getTM<NVPTXTargetMachine>().getSubtargetImpl();
256   addPass(createNVVMReflectPass(ST.getSmVersion()));
257 
258   if (getOptLevel() != CodeGenOpt::None)
259     addPass(createNVPTXImageOptimizerPass());
260   addPass(createNVPTXAssignValidGlobalNamesPass());
261   addPass(createGenericToNVVMPass());
262 
263   // NVPTXLowerArgs is required for correctness and should be run right
264   // before the address space inference passes.
265   addPass(createNVPTXLowerArgsPass(&getNVPTXTargetMachine()));
266   if (getOptLevel() != CodeGenOpt::None) {
267     addAddressSpaceInferencePasses();
268     if (!DisableLoadStoreVectorizer)
269       addPass(createLoadStoreVectorizerPass());
270     addStraightLineScalarOptimizationPasses();
271   }
272 
273   // === LSR and other generic IR passes ===
274   TargetPassConfig::addIRPasses();
275   // EarlyCSE is not always strong enough to clean up what LSR produces. For
276   // example, GVN can combine
277   //
278   //   %0 = add %a, %b
279   //   %1 = add %b, %a
280   //
281   // and
282   //
283   //   %0 = shl nsw %a, 2
284   //   %1 = shl %a, 2
285   //
286   // but EarlyCSE can do neither of them.
287   if (getOptLevel() != CodeGenOpt::None)
288     addEarlyCSEOrGVNPass();
289 }
290 
291 bool NVPTXPassConfig::addInstSelector() {
292   const NVPTXSubtarget &ST = *getTM<NVPTXTargetMachine>().getSubtargetImpl();
293 
294   addPass(createLowerAggrCopies());
295   addPass(createAllocaHoisting());
296   addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel()));
297 
298   if (!ST.hasImageHandles())
299     addPass(createNVPTXReplaceImageHandlesPass());
300 
301   return false;
302 }
303 
304 void NVPTXPassConfig::addPostRegAlloc() {
305   addPass(createNVPTXPrologEpilogPass(), false);
306   if (getOptLevel() != CodeGenOpt::None) {
307     // NVPTXPrologEpilogPass calculates frame object offset and replace frame
308     // index with VRFrame register. NVPTXPeephole need to be run after that and
309     // will replace VRFrame with VRFrameLocal when possible.
310     addPass(createNVPTXPeephole());
311   }
312 }
313 
314 FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
315   return nullptr; // No reg alloc
316 }
317 
318 void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
319   assert(!RegAllocPass && "NVPTX uses no regalloc!");
320   addPass(&PHIEliminationID);
321   addPass(&TwoAddressInstructionPassID);
322 }
323 
324 void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
325   assert(!RegAllocPass && "NVPTX uses no regalloc!");
326 
327   addPass(&ProcessImplicitDefsID);
328   addPass(&LiveVariablesID);
329   addPass(&MachineLoopInfoID);
330   addPass(&PHIEliminationID);
331 
332   addPass(&TwoAddressInstructionPassID);
333   addPass(&RegisterCoalescerID);
334 
335   // PreRA instruction scheduling.
336   if (addPass(&MachineSchedulerID))
337     printAndVerify("After Machine Scheduling");
338 
339 
340   addPass(&StackSlotColoringID);
341 
342   // FIXME: Needs physical registers
343   //addPass(&MachineLICMID);
344 
345   printAndVerify("After StackSlotColoring");
346 }
347 
348 void NVPTXPassConfig::addMachineSSAOptimization() {
349   // Pre-ra tail duplication.
350   if (addPass(&EarlyTailDuplicateID))
351     printAndVerify("After Pre-RegAlloc TailDuplicate");
352 
353   // Optimize PHIs before DCE: removing dead PHI cycles may make more
354   // instructions dead.
355   addPass(&OptimizePHIsID);
356 
357   // This pass merges large allocas. StackSlotColoring is a different pass
358   // which merges spill slots.
359   addPass(&StackColoringID);
360 
361   // If the target requests it, assign local variables to stack slots relative
362   // to one another and simplify frame index references where possible.
363   addPass(&LocalStackSlotAllocationID);
364 
365   // With optimization, dead code should already be eliminated. However
366   // there is one known exception: lowered code for arguments that are only
367   // used by tail calls, where the tail calls reuse the incoming stack
368   // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
369   addPass(&DeadMachineInstructionElimID);
370   printAndVerify("After codegen DCE pass");
371 
372   // Allow targets to insert passes that improve instruction level parallelism,
373   // like if-conversion. Such passes will typically need dominator trees and
374   // loop info, just like LICM and CSE below.
375   if (addILPOpts())
376     printAndVerify("After ILP optimizations");
377 
378   addPass(&EarlyMachineLICMID);
379   addPass(&MachineCSEID);
380 
381   addPass(&MachineSinkingID);
382   printAndVerify("After Machine LICM, CSE and Sinking passes");
383 
384   addPass(&PeepholeOptimizerID);
385   printAndVerify("After codegen peephole optimization pass");
386 }
387