1 //===-- NVPTXTargetMachine.cpp - Define TargetMachine for NVPTX -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Top-level implementation for the NVPTX target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "NVPTXTargetMachine.h"
15 #include "MCTargetDesc/NVPTXMCAsmInfo.h"
16 #include "NVPTX.h"
17 #include "NVPTXAllocaHoisting.h"
18 #include "NVPTXLowerAggrCopies.h"
19 #include "NVPTXTargetObjectFile.h"
20 #include "NVPTXTargetTransformInfo.h"
21 #include "llvm/Analysis/Passes.h"
22 #include "llvm/CodeGen/AsmPrinter.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/IRPrintingPasses.h"
28 #include "llvm/IR/LegacyPassManager.h"
29 #include "llvm/IR/Verifier.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCInstrInfo.h"
32 #include "llvm/MC/MCStreamer.h"
33 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/FormattedStream.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetLoweringObjectFile.h"
42 #include "llvm/Target/TargetMachine.h"
43 #include "llvm/Target/TargetOptions.h"
44 #include "llvm/Target/TargetRegisterInfo.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include "llvm/Transforms/Scalar/GVN.h"
48 #include "llvm/Transforms/Vectorize.h"
49 
50 using namespace llvm;
51 
52 static cl::opt<bool> UseInferAddressSpaces(
53     "nvptx-use-infer-addrspace", cl::init(true), cl::Hidden,
54     cl::desc("Optimize address spaces using NVPTXInferAddressSpaces instead of "
55              "NVPTXFavorNonGenericAddrSpaces"));
56 
57 // LSV is still relatively new; this switch lets us turn it off in case we
58 // encounter (or suspect) a bug.
59 static cl::opt<bool>
60     DisableLoadStoreVectorizer("disable-nvptx-load-store-vectorizer",
61                                cl::desc("Disable load/store vectorizer"),
62                                cl::init(false), cl::Hidden);
63 
64 namespace llvm {
65 void initializeNVVMIntrRangePass(PassRegistry&);
66 void initializeNVVMReflectPass(PassRegistry&);
67 void initializeGenericToNVVMPass(PassRegistry&);
68 void initializeNVPTXAllocaHoistingPass(PassRegistry &);
69 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
70 void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
71 void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
72 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
73 void initializeNVPTXLowerArgsPass(PassRegistry &);
74 void initializeNVPTXLowerAllocaPass(PassRegistry &);
75 }
76 
77 extern "C" void LLVMInitializeNVPTXTarget() {
78   // Register the target.
79   RegisterTargetMachine<NVPTXTargetMachine32> X(getTheNVPTXTarget32());
80   RegisterTargetMachine<NVPTXTargetMachine64> Y(getTheNVPTXTarget64());
81 
82   // FIXME: This pass is really intended to be invoked during IR optimization,
83   // but it's very NVPTX-specific.
84   PassRegistry &PR = *PassRegistry::getPassRegistry();
85   initializeNVVMReflectPass(PR);
86   initializeNVVMIntrRangePass(PR);
87   initializeGenericToNVVMPass(PR);
88   initializeNVPTXAllocaHoistingPass(PR);
89   initializeNVPTXAssignValidGlobalNamesPass(PR);
90   initializeNVPTXFavorNonGenericAddrSpacesPass(PR);
91   initializeNVPTXInferAddressSpacesPass(PR);
92   initializeNVPTXLowerArgsPass(PR);
93   initializeNVPTXLowerAllocaPass(PR);
94   initializeNVPTXLowerAggrCopiesPass(PR);
95 }
96 
97 static std::string computeDataLayout(bool is64Bit) {
98   std::string Ret = "e";
99 
100   if (!is64Bit)
101     Ret += "-p:32:32";
102 
103   Ret += "-i64:64-v16:16-v32:32-n16:32:64";
104 
105   return Ret;
106 }
107 
108 NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
109                                        StringRef CPU, StringRef FS,
110                                        const TargetOptions &Options,
111                                        Optional<Reloc::Model> RM,
112                                        CodeModel::Model CM,
113                                        CodeGenOpt::Level OL, bool is64bit)
114     // The pic relocation model is used regardless of what the client has
115     // specified, as it is the only relocation model currently supported.
116     : LLVMTargetMachine(T, computeDataLayout(is64bit), TT, CPU, FS, Options,
117                         Reloc::PIC_, CM, OL),
118       is64bit(is64bit),
119       TLOF(make_unique<NVPTXTargetObjectFile>()),
120       Subtarget(TT, CPU, FS, *this) {
121   if (TT.getOS() == Triple::NVCL)
122     drvInterface = NVPTX::NVCL;
123   else
124     drvInterface = NVPTX::CUDA;
125   initAsmInfo();
126 }
127 
128 NVPTXTargetMachine::~NVPTXTargetMachine() {}
129 
130 void NVPTXTargetMachine32::anchor() {}
131 
132 NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT,
133                                            StringRef CPU, StringRef FS,
134                                            const TargetOptions &Options,
135                                            Optional<Reloc::Model> RM,
136                                            CodeModel::Model CM,
137                                            CodeGenOpt::Level OL)
138     : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
139 
140 void NVPTXTargetMachine64::anchor() {}
141 
142 NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT,
143                                            StringRef CPU, StringRef FS,
144                                            const TargetOptions &Options,
145                                            Optional<Reloc::Model> RM,
146                                            CodeModel::Model CM,
147                                            CodeGenOpt::Level OL)
148     : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
149 
150 namespace {
151 class NVPTXPassConfig : public TargetPassConfig {
152 public:
153   NVPTXPassConfig(NVPTXTargetMachine *TM, PassManagerBase &PM)
154       : TargetPassConfig(TM, PM) {}
155 
156   NVPTXTargetMachine &getNVPTXTargetMachine() const {
157     return getTM<NVPTXTargetMachine>();
158   }
159 
160   void addIRPasses() override;
161   bool addInstSelector() override;
162   void addPostRegAlloc() override;
163   void addMachineSSAOptimization() override;
164 
165   FunctionPass *createTargetRegisterAllocator(bool) override;
166   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
167   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
168 
169 private:
170   // If the opt level is aggressive, add GVN; otherwise, add EarlyCSE. This
171   // function is only called in opt mode.
172   void addEarlyCSEOrGVNPass();
173 
174   // Add passes that propagate special memory spaces.
175   void addAddressSpaceInferencePasses();
176 
177   // Add passes that perform straight-line scalar optimizations.
178   void addStraightLineScalarOptimizationPasses();
179 };
180 } // end anonymous namespace
181 
182 TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
183   return new NVPTXPassConfig(this, PM);
184 }
185 
186 void NVPTXTargetMachine::addEarlyAsPossiblePasses(PassManagerBase &PM) {
187   PM.add(createNVVMReflectPass());
188   PM.add(createNVVMIntrRangePass(Subtarget.getSmVersion()));
189 }
190 
191 TargetIRAnalysis NVPTXTargetMachine::getTargetIRAnalysis() {
192   return TargetIRAnalysis([this](const Function &F) {
193     return TargetTransformInfo(NVPTXTTIImpl(this, F));
194   });
195 }
196 
197 void NVPTXPassConfig::addEarlyCSEOrGVNPass() {
198   if (getOptLevel() == CodeGenOpt::Aggressive)
199     addPass(createGVNPass());
200   else
201     addPass(createEarlyCSEPass());
202 }
203 
204 void NVPTXPassConfig::addAddressSpaceInferencePasses() {
205   // NVPTXLowerArgs emits alloca for byval parameters which can often
206   // be eliminated by SROA.
207   addPass(createSROAPass());
208   addPass(createNVPTXLowerAllocaPass());
209   if (UseInferAddressSpaces) {
210     addPass(createNVPTXInferAddressSpacesPass());
211   } else {
212     addPass(createNVPTXFavorNonGenericAddrSpacesPass());
213     // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
214     // them unused. We could remove dead code in an ad-hoc manner, but that
215     // requires manual work and might be error-prone.
216     addPass(createDeadCodeEliminationPass());
217   }
218 }
219 
220 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() {
221   addPass(createSeparateConstOffsetFromGEPPass());
222   addPass(createSpeculativeExecutionPass());
223   // ReassociateGEPs exposes more opportunites for SLSR. See
224   // the example in reassociate-geps-and-slsr.ll.
225   addPass(createStraightLineStrengthReducePass());
226   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
227   // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE
228   // for some of our benchmarks.
229   addEarlyCSEOrGVNPass();
230   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
231   addPass(createNaryReassociatePass());
232   // NaryReassociate on GEPs creates redundant common expressions, so run
233   // EarlyCSE after it.
234   addPass(createEarlyCSEPass());
235 }
236 
237 void NVPTXPassConfig::addIRPasses() {
238   // The following passes are known to not play well with virtual regs hanging
239   // around after register allocation (which in our case, is *all* registers).
240   // We explicitly disable them here.  We do, however, need some functionality
241   // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the
242   // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp).
243   disablePass(&PrologEpilogCodeInserterID);
244   disablePass(&MachineCopyPropagationID);
245   disablePass(&TailDuplicateID);
246   disablePass(&StackMapLivenessID);
247   disablePass(&LiveDebugValuesID);
248   disablePass(&PostRASchedulerID);
249   disablePass(&FuncletLayoutID);
250   disablePass(&PatchableFunctionID);
251 
252   // NVVMReflectPass is added in addEarlyAsPossiblePasses, so hopefully running
253   // it here does nothing.  But since we need it for correctness when lowering
254   // to NVPTX, run it here too, in case whoever built our pass pipeline didn't
255   // call addEarlyAsPossiblePasses.
256   addPass(createNVVMReflectPass());
257 
258   if (getOptLevel() != CodeGenOpt::None)
259     addPass(createNVPTXImageOptimizerPass());
260   addPass(createNVPTXAssignValidGlobalNamesPass());
261   addPass(createGenericToNVVMPass());
262 
263   // NVPTXLowerArgs is required for correctness and should be run right
264   // before the address space inference passes.
265   addPass(createNVPTXLowerArgsPass(&getNVPTXTargetMachine()));
266   if (getOptLevel() != CodeGenOpt::None) {
267     addAddressSpaceInferencePasses();
268     if (!DisableLoadStoreVectorizer)
269       addPass(createLoadStoreVectorizerPass());
270     addStraightLineScalarOptimizationPasses();
271   }
272 
273   // === LSR and other generic IR passes ===
274   TargetPassConfig::addIRPasses();
275   // EarlyCSE is not always strong enough to clean up what LSR produces. For
276   // example, GVN can combine
277   //
278   //   %0 = add %a, %b
279   //   %1 = add %b, %a
280   //
281   // and
282   //
283   //   %0 = shl nsw %a, 2
284   //   %1 = shl %a, 2
285   //
286   // but EarlyCSE can do neither of them.
287   if (getOptLevel() != CodeGenOpt::None)
288     addEarlyCSEOrGVNPass();
289 }
290 
291 bool NVPTXPassConfig::addInstSelector() {
292   const NVPTXSubtarget &ST = *getTM<NVPTXTargetMachine>().getSubtargetImpl();
293 
294   addPass(createLowerAggrCopies());
295   addPass(createAllocaHoisting());
296   addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel()));
297 
298   if (!ST.hasImageHandles())
299     addPass(createNVPTXReplaceImageHandlesPass());
300 
301   return false;
302 }
303 
304 void NVPTXPassConfig::addPostRegAlloc() {
305   addPass(createNVPTXPrologEpilogPass(), false);
306   if (getOptLevel() != CodeGenOpt::None) {
307     // NVPTXPrologEpilogPass calculates frame object offset and replace frame
308     // index with VRFrame register. NVPTXPeephole need to be run after that and
309     // will replace VRFrame with VRFrameLocal when possible.
310     addPass(createNVPTXPeephole());
311   }
312 }
313 
314 FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
315   return nullptr; // No reg alloc
316 }
317 
318 void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
319   assert(!RegAllocPass && "NVPTX uses no regalloc!");
320   addPass(&PHIEliminationID);
321   addPass(&TwoAddressInstructionPassID);
322 }
323 
324 void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
325   assert(!RegAllocPass && "NVPTX uses no regalloc!");
326 
327   addPass(&ProcessImplicitDefsID);
328   addPass(&LiveVariablesID);
329   addPass(&MachineLoopInfoID);
330   addPass(&PHIEliminationID);
331 
332   addPass(&TwoAddressInstructionPassID);
333   addPass(&RegisterCoalescerID);
334 
335   // PreRA instruction scheduling.
336   if (addPass(&MachineSchedulerID))
337     printAndVerify("After Machine Scheduling");
338 
339 
340   addPass(&StackSlotColoringID);
341 
342   // FIXME: Needs physical registers
343   //addPass(&PostRAMachineLICMID);
344 
345   printAndVerify("After StackSlotColoring");
346 }
347 
348 void NVPTXPassConfig::addMachineSSAOptimization() {
349   // Pre-ra tail duplication.
350   if (addPass(&EarlyTailDuplicateID))
351     printAndVerify("After Pre-RegAlloc TailDuplicate");
352 
353   // Optimize PHIs before DCE: removing dead PHI cycles may make more
354   // instructions dead.
355   addPass(&OptimizePHIsID);
356 
357   // This pass merges large allocas. StackSlotColoring is a different pass
358   // which merges spill slots.
359   addPass(&StackColoringID);
360 
361   // If the target requests it, assign local variables to stack slots relative
362   // to one another and simplify frame index references where possible.
363   addPass(&LocalStackSlotAllocationID);
364 
365   // With optimization, dead code should already be eliminated. However
366   // there is one known exception: lowered code for arguments that are only
367   // used by tail calls, where the tail calls reuse the incoming stack
368   // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
369   addPass(&DeadMachineInstructionElimID);
370   printAndVerify("After codegen DCE pass");
371 
372   // Allow targets to insert passes that improve instruction level parallelism,
373   // like if-conversion. Such passes will typically need dominator trees and
374   // loop info, just like LICM and CSE below.
375   if (addILPOpts())
376     printAndVerify("After ILP optimizations");
377 
378   addPass(&MachineLICMID);
379   addPass(&MachineCSEID);
380 
381   addPass(&MachineSinkingID);
382   printAndVerify("After Machine LICM, CSE and Sinking passes");
383 
384   addPass(&PeepholeOptimizerID);
385   printAndVerify("After codegen peephole optimization pass");
386 }
387