1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
27 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/MachineModuleInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/IR/Verifier.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_os_ostream.h"
36 #include "llvm/Transforms/IPO.h"
37 #include "llvm/Transforms/Scalar.h"
38 #include <llvm/CodeGen/Passes.h>
39 
40 using namespace llvm;
41 
42 extern "C" void LLVMInitializeAMDGPUTarget() {
43   // Register the target
44   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
45   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
46 
47   PassRegistry *PR = PassRegistry::getPassRegistry();
48   initializeSILowerI1CopiesPass(*PR);
49   initializeSIFixSGPRCopiesPass(*PR);
50   initializeSIFoldOperandsPass(*PR);
51   initializeSIFixControlFlowLiveIntervalsPass(*PR);
52   initializeSILoadStoreOptimizerPass(*PR);
53   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
54   initializeAMDGPUAnnotateUniformValuesPass(*PR);
55   initializeAMDGPUPromoteAllocaPass(*PR);
56   initializeSIAnnotateControlFlowPass(*PR);
57   initializeSIInsertNopsPass(*PR);
58   initializeSIInsertWaitsPass(*PR);
59   initializeSIWholeQuadModePass(*PR);
60   initializeSILowerControlFlowPass(*PR);
61 }
62 
63 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
64   if (TT.getOS() == Triple::AMDHSA)
65     return make_unique<AMDGPUHSATargetObjectFile>();
66 
67   return make_unique<AMDGPUTargetObjectFile>();
68 }
69 
70 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
71   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
72 }
73 
74 static MachineSchedRegistry
75 R600SchedRegistry("r600", "Run R600's custom scheduler",
76                    createR600MachineScheduler);
77 
78 static MachineSchedRegistry
79 SISchedRegistry("si", "Run SI's custom scheduler",
80                 createSIMachineScheduler);
81 
82 static std::string computeDataLayout(const Triple &TT) {
83   std::string Ret = "e-p:32:32";
84 
85   if (TT.getArch() == Triple::amdgcn) {
86     // 32-bit private, local, and region pointers. 64-bit global and constant.
87     Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
88   }
89 
90   Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
91          "-v512:512-v1024:1024-v2048:2048-n32:64";
92 
93   return Ret;
94 }
95 
96 LLVM_READNONE
97 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
98   if (!GPU.empty())
99     return GPU;
100 
101   // HSA only supports CI+, so change the default GPU to a CI for HSA.
102   if (TT.getArch() == Triple::amdgcn)
103     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
104 
105   return "";
106 }
107 
108 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
109                                          StringRef CPU, StringRef FS,
110                                          TargetOptions Options, Reloc::Model RM,
111                                          CodeModel::Model CM,
112                                          CodeGenOpt::Level OptLevel)
113     : LLVMTargetMachine(T, computeDataLayout(TT), TT,
114                         getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
115                         OptLevel),
116       TLOF(createTLOF(getTargetTriple())),
117       Subtarget(TT, getTargetCPU(), FS, *this),
118       IntrinsicInfo() {
119   setRequiresStructuredCFG(true);
120   initAsmInfo();
121 }
122 
123 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
124 
125 //===----------------------------------------------------------------------===//
126 // R600 Target Machine (R600 -> Cayman)
127 //===----------------------------------------------------------------------===//
128 
129 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
130                                      StringRef CPU, StringRef FS,
131                                      TargetOptions Options, Reloc::Model RM,
132                                      CodeModel::Model CM, CodeGenOpt::Level OL)
133     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
134 
135 //===----------------------------------------------------------------------===//
136 // GCN Target Machine (SI+)
137 //===----------------------------------------------------------------------===//
138 
139 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
140                                    StringRef CPU, StringRef FS,
141                                    TargetOptions Options, Reloc::Model RM,
142                                    CodeModel::Model CM, CodeGenOpt::Level OL)
143     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
144 
145 //===----------------------------------------------------------------------===//
146 // AMDGPU Pass Setup
147 //===----------------------------------------------------------------------===//
148 
149 namespace {
150 
151 class AMDGPUPassConfig : public TargetPassConfig {
152 public:
153   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
154     : TargetPassConfig(TM, PM) {
155 
156     // Exceptions and StackMaps are not supported, so these passes will never do
157     // anything.
158     disablePass(&StackMapLivenessID);
159     disablePass(&FuncletLayoutID);
160   }
161 
162   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
163     return getTM<AMDGPUTargetMachine>();
164   }
165 
166   ScheduleDAGInstrs *
167   createMachineScheduler(MachineSchedContext *C) const override {
168     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
169     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
170       return createR600MachineScheduler(C);
171     else if (ST.enableSIScheduler())
172       return createSIMachineScheduler(C);
173     return nullptr;
174   }
175 
176   void addIRPasses() override;
177   void addCodeGenPrepare() override;
178   bool addPreISel() override;
179   bool addInstSelector() override;
180   bool addGCPasses() override;
181 };
182 
183 class R600PassConfig final : public AMDGPUPassConfig {
184 public:
185   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
186     : AMDGPUPassConfig(TM, PM) { }
187 
188   bool addPreISel() override;
189   void addPreRegAlloc() override;
190   void addPreSched2() override;
191   void addPreEmitPass() override;
192 };
193 
194 class GCNPassConfig final : public AMDGPUPassConfig {
195 public:
196   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
197     : AMDGPUPassConfig(TM, PM) { }
198   bool addPreISel() override;
199   void addMachineSSAOptimization() override;
200   bool addInstSelector() override;
201 #ifdef LLVM_BUILD_GLOBAL_ISEL
202   bool addIRTranslator() override;
203   bool addRegBankSelect() override;
204 #endif
205   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
206   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
207   void addPreRegAlloc() override;
208   void addPreSched2() override;
209   void addPreEmitPass() override;
210 };
211 
212 } // End of anonymous namespace
213 
214 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
215   return TargetIRAnalysis([this](const Function &F) {
216     return TargetTransformInfo(
217         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
218   });
219 }
220 
221 void AMDGPUPassConfig::addIRPasses() {
222   // Function calls are not supported, so make sure we inline everything.
223   addPass(createAMDGPUAlwaysInlinePass());
224   addPass(createAlwaysInlinerPass());
225   // We need to add the barrier noop pass, otherwise adding the function
226   // inlining pass will cause all of the PassConfigs passes to be run
227   // one function at a time, which means if we have a nodule with two
228   // functions, then we will generate code for the first function
229   // without ever running any passes on the second.
230   addPass(createBarrierNoopPass());
231 
232   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
233   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
234 
235   TargetPassConfig::addIRPasses();
236 }
237 
238 void AMDGPUPassConfig::addCodeGenPrepare() {
239   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
240   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
241   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
242     addPass(createAMDGPUPromoteAlloca(&TM));
243     addPass(createSROAPass());
244   }
245   TargetPassConfig::addCodeGenPrepare();
246 }
247 
248 bool
249 AMDGPUPassConfig::addPreISel() {
250   addPass(createFlattenCFGPass());
251   return false;
252 }
253 
254 bool AMDGPUPassConfig::addInstSelector() {
255   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
256   return false;
257 }
258 
259 bool AMDGPUPassConfig::addGCPasses() {
260   // Do nothing. GC is not supported.
261   return false;
262 }
263 
264 //===----------------------------------------------------------------------===//
265 // R600 Pass Setup
266 //===----------------------------------------------------------------------===//
267 
268 bool R600PassConfig::addPreISel() {
269   AMDGPUPassConfig::addPreISel();
270   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
271   if (ST.IsIRStructurizerEnabled())
272     addPass(createStructurizeCFGPass());
273   addPass(createR600TextureIntrinsicsReplacer());
274   return false;
275 }
276 
277 void R600PassConfig::addPreRegAlloc() {
278   addPass(createR600VectorRegMerger(*TM));
279 }
280 
281 void R600PassConfig::addPreSched2() {
282   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
283   addPass(createR600EmitClauseMarkers(), false);
284   if (ST.isIfCvtEnabled())
285     addPass(&IfConverterID, false);
286   addPass(createR600ClauseMergePass(*TM), false);
287 }
288 
289 void R600PassConfig::addPreEmitPass() {
290   addPass(createAMDGPUCFGStructurizerPass(), false);
291   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
292   addPass(&FinalizeMachineBundlesID, false);
293   addPass(createR600Packetizer(*TM), false);
294   addPass(createR600ControlFlowFinalizer(*TM), false);
295 }
296 
297 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
298   return new R600PassConfig(this, PM);
299 }
300 
301 //===----------------------------------------------------------------------===//
302 // GCN Pass Setup
303 //===----------------------------------------------------------------------===//
304 
305 bool GCNPassConfig::addPreISel() {
306   AMDGPUPassConfig::addPreISel();
307 
308   // FIXME: We need to run a pass to propagate the attributes when calls are
309   // supported.
310   addPass(&AMDGPUAnnotateKernelFeaturesID);
311   addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
312   addPass(createSinkingPass());
313   addPass(createSITypeRewriter());
314   addPass(createAMDGPUAnnotateUniformValues());
315   addPass(createSIAnnotateControlFlowPass());
316 
317   return false;
318 }
319 
320 void GCNPassConfig::addMachineSSAOptimization() {
321   TargetPassConfig::addMachineSSAOptimization();
322 
323   // We want to fold operands after PeepholeOptimizer has run (or as part of
324   // it), because it will eliminate extra copies making it easier to fold the
325   // real source operand. We want to eliminate dead instructions after, so that
326   // we see fewer uses of the copies. We then need to clean up the dead
327   // instructions leftover after the operands are folded as well.
328   //
329   // XXX - Can we get away without running DeadMachineInstructionElim again?
330   addPass(&SIFoldOperandsID);
331   addPass(&DeadMachineInstructionElimID);
332 }
333 
334 bool GCNPassConfig::addInstSelector() {
335   AMDGPUPassConfig::addInstSelector();
336   addPass(createSILowerI1CopiesPass());
337   addPass(&SIFixSGPRCopiesID);
338   return false;
339 }
340 
341 #ifdef LLVM_BUILD_GLOBAL_ISEL
342 bool GCNPassConfig::addIRTranslator() {
343   addPass(new IRTranslator());
344   return false;
345 }
346 
347 bool GCNPassConfig::addRegBankSelect() {
348   return false;
349 }
350 #endif
351 
352 void GCNPassConfig::addPreRegAlloc() {
353   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
354 
355   // This needs to be run directly before register allocation because
356   // earlier passes might recompute live intervals.
357   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
358   if (getOptLevel() > CodeGenOpt::None) {
359     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
360   }
361 
362   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
363     // Don't do this with no optimizations since it throws away debug info by
364     // merging nonadjacent loads.
365 
366     // This should be run after scheduling, but before register allocation. It
367     // also need extra copies to the address operand to be eliminated.
368     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
369     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
370   }
371   addPass(createSIShrinkInstructionsPass(), false);
372   addPass(createSIWholeQuadModePass());
373 }
374 
375 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
376   TargetPassConfig::addFastRegAlloc(RegAllocPass);
377 }
378 
379 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
380   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
381 }
382 
383 void GCNPassConfig::addPreSched2() {
384 }
385 
386 void GCNPassConfig::addPreEmitPass() {
387 
388   // The hazard recognizer that runs as part of the post-ra scheduler does not
389   // gaurantee to be able handle all hazards correctly.  This is because
390   // if there are multiple scheduling regions in a basic block, the regions
391   // are scheduled bottom up, so when we begin to schedule a region we don't
392   // know what instructions were emitted directly before it.
393   //
394   // Here we add a stand-alone hazard recognizer pass which can handle all cases.
395   // hazard recognizer pass.
396   addPass(&PostRAHazardRecognizerID);
397 
398   addPass(createSIInsertWaitsPass(), false);
399   addPass(createSIShrinkInstructionsPass());
400   addPass(createSILowerControlFlowPass(), false);
401   addPass(createSIInsertNopsPass(), false);
402 }
403 
404 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
405   return new GCNPassConfig(this, PM);
406 }
407