1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
27 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/IR/Verifier.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/IR/LegacyPassManager.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Support/raw_os_ostream.h"
37 #include "llvm/Transforms/IPO.h"
38 #include "llvm/Transforms/Scalar.h"
39 #include <llvm/CodeGen/Passes.h>
40 
41 using namespace llvm;
42 
43 extern "C" void LLVMInitializeAMDGPUTarget() {
44   // Register the target
45   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
46   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
47 
48   PassRegistry *PR = PassRegistry::getPassRegistry();
49   initializeSILowerI1CopiesPass(*PR);
50   initializeSIFixSGPRCopiesPass(*PR);
51   initializeSIFoldOperandsPass(*PR);
52   initializeSIFixControlFlowLiveIntervalsPass(*PR);
53   initializeSILoadStoreOptimizerPass(*PR);
54   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
55   initializeAMDGPUAnnotateUniformValuesPass(*PR);
56   initializeAMDGPUPromoteAllocaPass(*PR);
57   initializeSIAnnotateControlFlowPass(*PR);
58   initializeSIDebuggerInsertNopsPass(*PR);
59   initializeSIInsertWaitsPass(*PR);
60   initializeSIWholeQuadModePass(*PR);
61   initializeSILowerControlFlowPass(*PR);
62 }
63 
64 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
65   return make_unique<AMDGPUTargetObjectFile>();
66 }
67 
68 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
69   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
70 }
71 
72 static MachineSchedRegistry
73 R600SchedRegistry("r600", "Run R600's custom scheduler",
74                    createR600MachineScheduler);
75 
76 static MachineSchedRegistry
77 SISchedRegistry("si", "Run SI's custom scheduler",
78                 createSIMachineScheduler);
79 
80 static std::string computeDataLayout(const Triple &TT) {
81   std::string Ret = "e-p:32:32";
82 
83   if (TT.getArch() == Triple::amdgcn) {
84     // 32-bit private, local, and region pointers. 64-bit global and constant.
85     Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
86   }
87 
88   Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
89          "-v512:512-v1024:1024-v2048:2048-n32:64";
90 
91   return Ret;
92 }
93 
94 LLVM_READNONE
95 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
96   if (!GPU.empty())
97     return GPU;
98 
99   // HSA only supports CI+, so change the default GPU to a CI for HSA.
100   if (TT.getArch() == Triple::amdgcn)
101     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
102 
103   return "";
104 }
105 
106 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
107                                          StringRef CPU, StringRef FS,
108                                          TargetOptions Options, Reloc::Model RM,
109                                          CodeModel::Model CM,
110                                          CodeGenOpt::Level OptLevel)
111     : LLVMTargetMachine(T, computeDataLayout(TT), TT,
112                         getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
113                         OptLevel),
114       TLOF(createTLOF(getTargetTriple())),
115       Subtarget(TT, getTargetCPU(), FS, *this),
116       IntrinsicInfo() {
117   setRequiresStructuredCFG(true);
118   initAsmInfo();
119 }
120 
121 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
122 
123 //===----------------------------------------------------------------------===//
124 // R600 Target Machine (R600 -> Cayman)
125 //===----------------------------------------------------------------------===//
126 
127 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
128                                      StringRef CPU, StringRef FS,
129                                      TargetOptions Options, Reloc::Model RM,
130                                      CodeModel::Model CM, CodeGenOpt::Level OL)
131     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
132 
133 //===----------------------------------------------------------------------===//
134 // GCN Target Machine (SI+)
135 //===----------------------------------------------------------------------===//
136 
137 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
138                                    StringRef CPU, StringRef FS,
139                                    TargetOptions Options, Reloc::Model RM,
140                                    CodeModel::Model CM, CodeGenOpt::Level OL)
141     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
142 
143 //===----------------------------------------------------------------------===//
144 // AMDGPU Pass Setup
145 //===----------------------------------------------------------------------===//
146 
147 namespace {
148 
149 class AMDGPUPassConfig : public TargetPassConfig {
150 public:
151   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
152     : TargetPassConfig(TM, PM) {
153 
154     // Exceptions and StackMaps are not supported, so these passes will never do
155     // anything.
156     disablePass(&StackMapLivenessID);
157     disablePass(&FuncletLayoutID);
158   }
159 
160   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
161     return getTM<AMDGPUTargetMachine>();
162   }
163 
164   ScheduleDAGInstrs *
165   createMachineScheduler(MachineSchedContext *C) const override {
166     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
167     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
168       return createR600MachineScheduler(C);
169     else if (ST.enableSIScheduler())
170       return createSIMachineScheduler(C);
171     return nullptr;
172   }
173 
174   void addIRPasses() override;
175   void addCodeGenPrepare() override;
176   bool addPreISel() override;
177   bool addInstSelector() override;
178   bool addGCPasses() override;
179 };
180 
181 class R600PassConfig final : public AMDGPUPassConfig {
182 public:
183   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
184     : AMDGPUPassConfig(TM, PM) { }
185 
186   bool addPreISel() override;
187   void addPreRegAlloc() override;
188   void addPreSched2() override;
189   void addPreEmitPass() override;
190 };
191 
192 class GCNPassConfig final : public AMDGPUPassConfig {
193 public:
194   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
195     : AMDGPUPassConfig(TM, PM) { }
196   bool addPreISel() override;
197   void addMachineSSAOptimization() override;
198   bool addInstSelector() override;
199 #ifdef LLVM_BUILD_GLOBAL_ISEL
200   bool addIRTranslator() override;
201   bool addRegBankSelect() override;
202 #endif
203   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
204   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
205   void addPreRegAlloc() override;
206   void addPreSched2() override;
207   void addPreEmitPass() override;
208 };
209 
210 } // End of anonymous namespace
211 
212 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
213   return TargetIRAnalysis([this](const Function &F) {
214     return TargetTransformInfo(
215         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
216   });
217 }
218 
219 void AMDGPUPassConfig::addIRPasses() {
220   // Function calls are not supported, so make sure we inline everything.
221   addPass(createAMDGPUAlwaysInlinePass());
222   addPass(createAlwaysInlinerPass());
223   // We need to add the barrier noop pass, otherwise adding the function
224   // inlining pass will cause all of the PassConfigs passes to be run
225   // one function at a time, which means if we have a nodule with two
226   // functions, then we will generate code for the first function
227   // without ever running any passes on the second.
228   addPass(createBarrierNoopPass());
229 
230   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
231   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
232 
233   TargetPassConfig::addIRPasses();
234 }
235 
236 void AMDGPUPassConfig::addCodeGenPrepare() {
237   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
238   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
239   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
240     addPass(createAMDGPUPromoteAlloca(&TM));
241     addPass(createSROAPass());
242   }
243   TargetPassConfig::addCodeGenPrepare();
244 }
245 
246 bool
247 AMDGPUPassConfig::addPreISel() {
248   addPass(createFlattenCFGPass());
249   return false;
250 }
251 
252 bool AMDGPUPassConfig::addInstSelector() {
253   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
254   return false;
255 }
256 
257 bool AMDGPUPassConfig::addGCPasses() {
258   // Do nothing. GC is not supported.
259   return false;
260 }
261 
262 //===----------------------------------------------------------------------===//
263 // R600 Pass Setup
264 //===----------------------------------------------------------------------===//
265 
266 bool R600PassConfig::addPreISel() {
267   AMDGPUPassConfig::addPreISel();
268   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
269   if (ST.IsIRStructurizerEnabled())
270     addPass(createStructurizeCFGPass());
271   addPass(createR600TextureIntrinsicsReplacer());
272   return false;
273 }
274 
275 void R600PassConfig::addPreRegAlloc() {
276   addPass(createR600VectorRegMerger(*TM));
277 }
278 
279 void R600PassConfig::addPreSched2() {
280   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
281   addPass(createR600EmitClauseMarkers(), false);
282   if (ST.isIfCvtEnabled())
283     addPass(&IfConverterID, false);
284   addPass(createR600ClauseMergePass(*TM), false);
285 }
286 
287 void R600PassConfig::addPreEmitPass() {
288   addPass(createAMDGPUCFGStructurizerPass(), false);
289   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
290   addPass(&FinalizeMachineBundlesID, false);
291   addPass(createR600Packetizer(*TM), false);
292   addPass(createR600ControlFlowFinalizer(*TM), false);
293 }
294 
295 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
296   return new R600PassConfig(this, PM);
297 }
298 
299 //===----------------------------------------------------------------------===//
300 // GCN Pass Setup
301 //===----------------------------------------------------------------------===//
302 
303 bool GCNPassConfig::addPreISel() {
304   AMDGPUPassConfig::addPreISel();
305 
306   // FIXME: We need to run a pass to propagate the attributes when calls are
307   // supported.
308   addPass(&AMDGPUAnnotateKernelFeaturesID);
309   addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
310   addPass(createSinkingPass());
311   addPass(createSITypeRewriter());
312   addPass(createAMDGPUAnnotateUniformValues());
313   addPass(createSIAnnotateControlFlowPass());
314 
315   return false;
316 }
317 
318 void GCNPassConfig::addMachineSSAOptimization() {
319   TargetPassConfig::addMachineSSAOptimization();
320 
321   // We want to fold operands after PeepholeOptimizer has run (or as part of
322   // it), because it will eliminate extra copies making it easier to fold the
323   // real source operand. We want to eliminate dead instructions after, so that
324   // we see fewer uses of the copies. We then need to clean up the dead
325   // instructions leftover after the operands are folded as well.
326   //
327   // XXX - Can we get away without running DeadMachineInstructionElim again?
328   addPass(&SIFoldOperandsID);
329   addPass(&DeadMachineInstructionElimID);
330 }
331 
332 bool GCNPassConfig::addInstSelector() {
333   AMDGPUPassConfig::addInstSelector();
334   addPass(createSILowerI1CopiesPass());
335   addPass(&SIFixSGPRCopiesID);
336   return false;
337 }
338 
339 #ifdef LLVM_BUILD_GLOBAL_ISEL
340 bool GCNPassConfig::addIRTranslator() {
341   addPass(new IRTranslator());
342   return false;
343 }
344 
345 bool GCNPassConfig::addRegBankSelect() {
346   return false;
347 }
348 #endif
349 
350 void GCNPassConfig::addPreRegAlloc() {
351   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
352 
353   // This needs to be run directly before register allocation because
354   // earlier passes might recompute live intervals.
355   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
356   if (getOptLevel() > CodeGenOpt::None) {
357     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
358   }
359 
360   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
361     // Don't do this with no optimizations since it throws away debug info by
362     // merging nonadjacent loads.
363 
364     // This should be run after scheduling, but before register allocation. It
365     // also need extra copies to the address operand to be eliminated.
366     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
367     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
368   }
369   addPass(createSIShrinkInstructionsPass(), false);
370   addPass(createSIWholeQuadModePass());
371 }
372 
373 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
374   TargetPassConfig::addFastRegAlloc(RegAllocPass);
375 }
376 
377 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
378   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
379 }
380 
381 void GCNPassConfig::addPreSched2() {
382 }
383 
384 void GCNPassConfig::addPreEmitPass() {
385 
386   // The hazard recognizer that runs as part of the post-ra scheduler does not
387   // gaurantee to be able handle all hazards correctly.  This is because
388   // if there are multiple scheduling regions in a basic block, the regions
389   // are scheduled bottom up, so when we begin to schedule a region we don't
390   // know what instructions were emitted directly before it.
391   //
392   // Here we add a stand-alone hazard recognizer pass which can handle all cases.
393   // hazard recognizer pass.
394   addPass(&PostRAHazardRecognizerID);
395 
396   addPass(createSIInsertWaitsPass(), false);
397   addPass(createSIShrinkInstructionsPass());
398   addPass(createSILowerControlFlowPass(), false);
399   addPass(createSIDebuggerInsertNopsPass(), false);
400 }
401 
402 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
403   return new GCNPassConfig(this, PM);
404 }
405