1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/IR/LegacyPassManager.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_os_ostream.h"
35 #include "llvm/Transforms/IPO.h"
36 #include "llvm/Transforms/Scalar.h"
37 #include <llvm/CodeGen/Passes.h>
38 
39 using namespace llvm;
40 
41 extern "C" void LLVMInitializeAMDGPUTarget() {
42   // Register the target
43   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
44   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
45 
46   PassRegistry *PR = PassRegistry::getPassRegistry();
47   initializeSILowerI1CopiesPass(*PR);
48   initializeSIFixSGPRCopiesPass(*PR);
49   initializeSIFoldOperandsPass(*PR);
50   initializeSIFixSGPRLiveRangesPass(*PR);
51   initializeSIFixControlFlowLiveIntervalsPass(*PR);
52   initializeSILoadStoreOptimizerPass(*PR);
53   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
54   initializeAMDGPUAnnotateUniformValuesPass(*PR);
55   initializeAMDGPUPromoteAllocaPass(*PR);
56   initializeSIAnnotateControlFlowPass(*PR);
57   initializeSIInsertWaitsPass(*PR);
58   initializeSILowerControlFlowPass(*PR);
59 }
60 
61 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
62   if (TT.getOS() == Triple::AMDHSA)
63     return make_unique<AMDGPUHSATargetObjectFile>();
64 
65   return make_unique<AMDGPUTargetObjectFile>();
66 }
67 
68 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
69   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
70 }
71 
72 static MachineSchedRegistry
73 R600SchedRegistry("r600", "Run R600's custom scheduler",
74                    createR600MachineScheduler);
75 
76 static MachineSchedRegistry
77 SISchedRegistry("si", "Run SI's custom scheduler",
78                 createSIMachineScheduler);
79 
80 static std::string computeDataLayout(const Triple &TT) {
81   std::string Ret = "e-p:32:32";
82 
83   if (TT.getArch() == Triple::amdgcn) {
84     // 32-bit private, local, and region pointers. 64-bit global and constant.
85     Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
86   }
87 
88   Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
89          "-v512:512-v1024:1024-v2048:2048-n32:64";
90 
91   return Ret;
92 }
93 
94 LLVM_READNONE
95 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
96   if (!GPU.empty())
97     return GPU;
98 
99   // HSA only supports CI+, so change the default GPU to a CI for HSA.
100   if (TT.getArch() == Triple::amdgcn)
101     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
102 
103   return "";
104 }
105 
106 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
107                                          StringRef CPU, StringRef FS,
108                                          TargetOptions Options, Reloc::Model RM,
109                                          CodeModel::Model CM,
110                                          CodeGenOpt::Level OptLevel)
111     : LLVMTargetMachine(T, computeDataLayout(TT), TT,
112                         getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
113                         OptLevel),
114       TLOF(createTLOF(getTargetTriple())),
115       Subtarget(TT, getTargetCPU(), FS, *this),
116       IntrinsicInfo() {
117   setRequiresStructuredCFG(true);
118   initAsmInfo();
119 }
120 
121 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
122 
123 //===----------------------------------------------------------------------===//
124 // R600 Target Machine (R600 -> Cayman)
125 //===----------------------------------------------------------------------===//
126 
127 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
128                                      StringRef CPU, StringRef FS,
129                                      TargetOptions Options, Reloc::Model RM,
130                                      CodeModel::Model CM, CodeGenOpt::Level OL)
131     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
132 
133 //===----------------------------------------------------------------------===//
134 // GCN Target Machine (SI+)
135 //===----------------------------------------------------------------------===//
136 
137 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
138                                    StringRef CPU, StringRef FS,
139                                    TargetOptions Options, Reloc::Model RM,
140                                    CodeModel::Model CM, CodeGenOpt::Level OL)
141     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
142 
143 //===----------------------------------------------------------------------===//
144 // AMDGPU Pass Setup
145 //===----------------------------------------------------------------------===//
146 
147 namespace {
148 class AMDGPUPassConfig : public TargetPassConfig {
149 public:
150   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
151     : TargetPassConfig(TM, PM) {
152 
153     // Exceptions and StackMaps are not supported, so these passes will never do
154     // anything.
155     disablePass(&StackMapLivenessID);
156     disablePass(&FuncletLayoutID);
157   }
158 
159   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
160     return getTM<AMDGPUTargetMachine>();
161   }
162 
163   ScheduleDAGInstrs *
164   createMachineScheduler(MachineSchedContext *C) const override {
165     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
166     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
167       return createR600MachineScheduler(C);
168     else if (ST.enableSIScheduler())
169       return createSIMachineScheduler(C);
170     return nullptr;
171   }
172 
173   void addIRPasses() override;
174   void addCodeGenPrepare() override;
175   bool addPreISel() override;
176   bool addInstSelector() override;
177   bool addGCPasses() override;
178 };
179 
180 class R600PassConfig : public AMDGPUPassConfig {
181 public:
182   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
183     : AMDGPUPassConfig(TM, PM) { }
184 
185   bool addPreISel() override;
186   void addPreRegAlloc() override;
187   void addPreSched2() override;
188   void addPreEmitPass() override;
189 };
190 
191 class GCNPassConfig : public AMDGPUPassConfig {
192 public:
193   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
194     : AMDGPUPassConfig(TM, PM) { }
195   bool addPreISel() override;
196   bool addInstSelector() override;
197   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
198   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
199   void addPreRegAlloc() override;
200   void addPostRegAlloc() override;
201   void addPreSched2() override;
202   void addPreEmitPass() override;
203 };
204 
205 } // End of anonymous namespace
206 
207 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
208   return TargetIRAnalysis([this](const Function &F) {
209     return TargetTransformInfo(
210         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
211   });
212 }
213 
214 void AMDGPUPassConfig::addIRPasses() {
215   // Function calls are not supported, so make sure we inline everything.
216   addPass(createAMDGPUAlwaysInlinePass());
217   addPass(createAlwaysInlinerPass());
218   // We need to add the barrier noop pass, otherwise adding the function
219   // inlining pass will cause all of the PassConfigs passes to be run
220   // one function at a time, which means if we have a nodule with two
221   // functions, then we will generate code for the first function
222   // without ever running any passes on the second.
223   addPass(createBarrierNoopPass());
224 
225   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
226   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
227 
228   TargetPassConfig::addIRPasses();
229 }
230 
231 void AMDGPUPassConfig::addCodeGenPrepare() {
232   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
233   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
234   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
235     addPass(createAMDGPUPromoteAlloca(&TM));
236     addPass(createSROAPass());
237   }
238   TargetPassConfig::addCodeGenPrepare();
239 }
240 
241 bool
242 AMDGPUPassConfig::addPreISel() {
243   addPass(createFlattenCFGPass());
244   return false;
245 }
246 
247 bool AMDGPUPassConfig::addInstSelector() {
248   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
249   return false;
250 }
251 
252 bool AMDGPUPassConfig::addGCPasses() {
253   // Do nothing. GC is not supported.
254   return false;
255 }
256 
257 //===----------------------------------------------------------------------===//
258 // R600 Pass Setup
259 //===----------------------------------------------------------------------===//
260 
261 bool R600PassConfig::addPreISel() {
262   AMDGPUPassConfig::addPreISel();
263   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
264   if (ST.IsIRStructurizerEnabled())
265     addPass(createStructurizeCFGPass());
266   addPass(createR600TextureIntrinsicsReplacer());
267   return false;
268 }
269 
270 void R600PassConfig::addPreRegAlloc() {
271   addPass(createR600VectorRegMerger(*TM));
272 }
273 
274 void R600PassConfig::addPreSched2() {
275   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
276   addPass(createR600EmitClauseMarkers(), false);
277   if (ST.isIfCvtEnabled())
278     addPass(&IfConverterID, false);
279   addPass(createR600ClauseMergePass(*TM), false);
280 }
281 
282 void R600PassConfig::addPreEmitPass() {
283   addPass(createAMDGPUCFGStructurizerPass(), false);
284   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
285   addPass(&FinalizeMachineBundlesID, false);
286   addPass(createR600Packetizer(*TM), false);
287   addPass(createR600ControlFlowFinalizer(*TM), false);
288 }
289 
290 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
291   return new R600PassConfig(this, PM);
292 }
293 
294 //===----------------------------------------------------------------------===//
295 // GCN Pass Setup
296 //===----------------------------------------------------------------------===//
297 
298 bool GCNPassConfig::addPreISel() {
299   AMDGPUPassConfig::addPreISel();
300 
301   // FIXME: We need to run a pass to propagate the attributes when calls are
302   // supported.
303   addPass(&AMDGPUAnnotateKernelFeaturesID);
304   addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
305   addPass(createSinkingPass());
306   addPass(createSITypeRewriter());
307   addPass(createAMDGPUAnnotateUniformValues());
308   addPass(createSIAnnotateControlFlowPass());
309 
310   return false;
311 }
312 
313 bool GCNPassConfig::addInstSelector() {
314   AMDGPUPassConfig::addInstSelector();
315   addPass(createSILowerI1CopiesPass());
316   addPass(&SIFixSGPRCopiesID);
317   addPass(createSIFoldOperandsPass());
318   return false;
319 }
320 
321 void GCNPassConfig::addPreRegAlloc() {
322   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
323 
324   // This needs to be run directly before register allocation because
325   // earlier passes might recompute live intervals.
326   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
327   if (getOptLevel() > CodeGenOpt::None) {
328     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
329   }
330 
331   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
332     // Don't do this with no optimizations since it throws away debug info by
333     // merging nonadjacent loads.
334 
335     // This should be run after scheduling, but before register allocation. It
336     // also need extra copies to the address operand to be eliminated.
337     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
338     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
339   }
340   addPass(createSIShrinkInstructionsPass(), false);
341 }
342 
343 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
344   addPass(&SIFixSGPRLiveRangesID);
345   TargetPassConfig::addFastRegAlloc(RegAllocPass);
346 }
347 
348 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
349   // We want to run this after LiveVariables is computed to avoid computing them
350   // twice.
351   // FIXME: We shouldn't disable the verifier here. r249087 introduced a failure
352   // that needs to be fixed.
353   insertPass(&LiveVariablesID, &SIFixSGPRLiveRangesID, /*VerifyAfter=*/false);
354   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
355 }
356 
357 void GCNPassConfig::addPostRegAlloc() {
358   addPass(createSIShrinkInstructionsPass(), false);
359 }
360 
361 void GCNPassConfig::addPreSched2() {
362 }
363 
364 void GCNPassConfig::addPreEmitPass() {
365   addPass(createSIInsertWaitsPass(), false);
366   addPass(createSILowerControlFlowPass(), false);
367 }
368 
369 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
370   return new GCNPassConfig(this, PM);
371 }
372