1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/IR/LegacyPassManager.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_os_ostream.h"
35 #include "llvm/Transforms/IPO.h"
36 #include "llvm/Transforms/Scalar.h"
37 #include <llvm/CodeGen/Passes.h>
38 
39 using namespace llvm;
40 
41 extern "C" void LLVMInitializeAMDGPUTarget() {
42   // Register the target
43   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
44   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
45 
46   PassRegistry *PR = PassRegistry::getPassRegistry();
47   initializeSILowerI1CopiesPass(*PR);
48   initializeSIFixSGPRCopiesPass(*PR);
49   initializeSIFoldOperandsPass(*PR);
50   initializeSIFixSGPRLiveRangesPass(*PR);
51   initializeSIFixControlFlowLiveIntervalsPass(*PR);
52   initializeSILoadStoreOptimizerPass(*PR);
53   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
54   initializeAMDGPUAnnotateUniformValuesPass(*PR);
55   initializeAMDGPUPromoteAllocaPass(*PR);
56   initializeSIAnnotateControlFlowPass(*PR);
57 }
58 
59 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
60   if (TT.getOS() == Triple::AMDHSA)
61     return make_unique<AMDGPUHSATargetObjectFile>();
62 
63   return make_unique<AMDGPUTargetObjectFile>();
64 }
65 
66 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
67   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
68 }
69 
70 static MachineSchedRegistry
71 R600SchedRegistry("r600", "Run R600's custom scheduler",
72                    createR600MachineScheduler);
73 
74 static MachineSchedRegistry
75 SISchedRegistry("si", "Run SI's custom scheduler",
76                 createSIMachineScheduler);
77 
78 static std::string computeDataLayout(const Triple &TT) {
79   std::string Ret = "e-p:32:32";
80 
81   if (TT.getArch() == Triple::amdgcn) {
82     // 32-bit private, local, and region pointers. 64-bit global and constant.
83     Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
84   }
85 
86   Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
87          "-v512:512-v1024:1024-v2048:2048-n32:64";
88 
89   return Ret;
90 }
91 
92 LLVM_READNONE
93 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
94   if (!GPU.empty())
95     return GPU;
96 
97   // HSA only supports CI+, so change the default GPU to a CI for HSA.
98   if (TT.getArch() == Triple::amdgcn)
99     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
100 
101   return "";
102 }
103 
104 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
105                                          StringRef CPU, StringRef FS,
106                                          TargetOptions Options, Reloc::Model RM,
107                                          CodeModel::Model CM,
108                                          CodeGenOpt::Level OptLevel)
109     : LLVMTargetMachine(T, computeDataLayout(TT), TT,
110                         getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
111                         OptLevel),
112       TLOF(createTLOF(getTargetTriple())),
113       Subtarget(TT, getTargetCPU(), FS, *this),
114       IntrinsicInfo() {
115   setRequiresStructuredCFG(true);
116   initAsmInfo();
117 }
118 
119 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
120 
121 //===----------------------------------------------------------------------===//
122 // R600 Target Machine (R600 -> Cayman)
123 //===----------------------------------------------------------------------===//
124 
125 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
126                                      StringRef FS, StringRef CPU,
127                                      TargetOptions Options, Reloc::Model RM,
128                                      CodeModel::Model CM, CodeGenOpt::Level OL)
129     : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
130 
131 //===----------------------------------------------------------------------===//
132 // GCN Target Machine (SI+)
133 //===----------------------------------------------------------------------===//
134 
135 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
136                                    StringRef FS, StringRef CPU,
137                                    TargetOptions Options, Reloc::Model RM,
138                                    CodeModel::Model CM, CodeGenOpt::Level OL)
139     : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
140 
141 //===----------------------------------------------------------------------===//
142 // AMDGPU Pass Setup
143 //===----------------------------------------------------------------------===//
144 
145 namespace {
146 class AMDGPUPassConfig : public TargetPassConfig {
147 public:
148   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
149     : TargetPassConfig(TM, PM) {
150 
151     // Exceptions and StackMaps are not supported, so these passes will never do
152     // anything.
153     disablePass(&StackMapLivenessID);
154     disablePass(&FuncletLayoutID);
155   }
156 
157   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
158     return getTM<AMDGPUTargetMachine>();
159   }
160 
161   ScheduleDAGInstrs *
162   createMachineScheduler(MachineSchedContext *C) const override {
163     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
164     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
165       return createR600MachineScheduler(C);
166     else if (ST.enableSIScheduler())
167       return createSIMachineScheduler(C);
168     return nullptr;
169   }
170 
171   void addIRPasses() override;
172   void addCodeGenPrepare() override;
173   bool addPreISel() override;
174   bool addInstSelector() override;
175   bool addGCPasses() override;
176 };
177 
178 class R600PassConfig : public AMDGPUPassConfig {
179 public:
180   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
181     : AMDGPUPassConfig(TM, PM) { }
182 
183   bool addPreISel() override;
184   void addPreRegAlloc() override;
185   void addPreSched2() override;
186   void addPreEmitPass() override;
187 };
188 
189 class GCNPassConfig : public AMDGPUPassConfig {
190 public:
191   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
192     : AMDGPUPassConfig(TM, PM) { }
193   bool addPreISel() override;
194   bool addInstSelector() override;
195   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
196   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
197   void addPreRegAlloc() override;
198   void addPostRegAlloc() override;
199   void addPreSched2() override;
200   void addPreEmitPass() override;
201 };
202 
203 } // End of anonymous namespace
204 
205 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
206   return TargetIRAnalysis([this](const Function &F) {
207     return TargetTransformInfo(
208         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
209   });
210 }
211 
212 void AMDGPUPassConfig::addIRPasses() {
213   // Function calls are not supported, so make sure we inline everything.
214   addPass(createAMDGPUAlwaysInlinePass());
215   addPass(createAlwaysInlinerPass());
216   // We need to add the barrier noop pass, otherwise adding the function
217   // inlining pass will cause all of the PassConfigs passes to be run
218   // one function at a time, which means if we have a nodule with two
219   // functions, then we will generate code for the first function
220   // without ever running any passes on the second.
221   addPass(createBarrierNoopPass());
222 
223   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
224   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
225 
226   TargetPassConfig::addIRPasses();
227 }
228 
229 void AMDGPUPassConfig::addCodeGenPrepare() {
230   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
231   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
232   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
233     addPass(createAMDGPUPromoteAlloca(&TM));
234     addPass(createSROAPass());
235   }
236   TargetPassConfig::addCodeGenPrepare();
237 }
238 
239 bool
240 AMDGPUPassConfig::addPreISel() {
241   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
242   addPass(createFlattenCFGPass());
243   if (ST.IsIRStructurizerEnabled())
244     addPass(createStructurizeCFGPass());
245   return false;
246 }
247 
248 bool AMDGPUPassConfig::addInstSelector() {
249   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
250   return false;
251 }
252 
253 bool AMDGPUPassConfig::addGCPasses() {
254   // Do nothing. GC is not supported.
255   return false;
256 }
257 
258 //===----------------------------------------------------------------------===//
259 // R600 Pass Setup
260 //===----------------------------------------------------------------------===//
261 
262 bool R600PassConfig::addPreISel() {
263   AMDGPUPassConfig::addPreISel();
264   addPass(createR600TextureIntrinsicsReplacer());
265   return false;
266 }
267 
268 void R600PassConfig::addPreRegAlloc() {
269   addPass(createR600VectorRegMerger(*TM));
270 }
271 
272 void R600PassConfig::addPreSched2() {
273   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
274   addPass(createR600EmitClauseMarkers(), false);
275   if (ST.isIfCvtEnabled())
276     addPass(&IfConverterID, false);
277   addPass(createR600ClauseMergePass(*TM), false);
278 }
279 
280 void R600PassConfig::addPreEmitPass() {
281   addPass(createAMDGPUCFGStructurizerPass(), false);
282   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
283   addPass(&FinalizeMachineBundlesID, false);
284   addPass(createR600Packetizer(*TM), false);
285   addPass(createR600ControlFlowFinalizer(*TM), false);
286 }
287 
288 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
289   return new R600PassConfig(this, PM);
290 }
291 
292 //===----------------------------------------------------------------------===//
293 // GCN Pass Setup
294 //===----------------------------------------------------------------------===//
295 
296 bool GCNPassConfig::addPreISel() {
297   AMDGPUPassConfig::addPreISel();
298 
299   // FIXME: We need to run a pass to propagate the attributes when calls are
300   // supported.
301   addPass(&AMDGPUAnnotateKernelFeaturesID);
302 
303   addPass(createSinkingPass());
304   addPass(createSITypeRewriter());
305   addPass(createSIAnnotateControlFlowPass());
306   addPass(createAMDGPUAnnotateUniformValues());
307 
308   return false;
309 }
310 
311 bool GCNPassConfig::addInstSelector() {
312   AMDGPUPassConfig::addInstSelector();
313   addPass(createSILowerI1CopiesPass());
314   addPass(&SIFixSGPRCopiesID);
315   addPass(createSIFoldOperandsPass());
316   return false;
317 }
318 
319 void GCNPassConfig::addPreRegAlloc() {
320   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
321 
322   // This needs to be run directly before register allocation because
323   // earlier passes might recompute live intervals.
324   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
325   if (getOptLevel() > CodeGenOpt::None) {
326     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
327   }
328 
329   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
330     // Don't do this with no optimizations since it throws away debug info by
331     // merging nonadjacent loads.
332 
333     // This should be run after scheduling, but before register allocation. It
334     // also need extra copies to the address operand to be eliminated.
335     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
336     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
337   }
338   addPass(createSIShrinkInstructionsPass(), false);
339 }
340 
341 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
342   addPass(&SIFixSGPRLiveRangesID);
343   TargetPassConfig::addFastRegAlloc(RegAllocPass);
344 }
345 
346 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
347   // We want to run this after LiveVariables is computed to avoid computing them
348   // twice.
349   // FIXME: We shouldn't disable the verifier here. r249087 introduced a failure
350   // that needs to be fixed.
351   insertPass(&LiveVariablesID, &SIFixSGPRLiveRangesID, /*VerifyAfter=*/false);
352   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
353 }
354 
355 void GCNPassConfig::addPostRegAlloc() {
356   addPass(createSIShrinkInstructionsPass(), false);
357 }
358 
359 void GCNPassConfig::addPreSched2() {
360 }
361 
362 void GCNPassConfig::addPreEmitPass() {
363   addPass(createSIInsertWaits(*TM), false);
364   addPass(createSILowerControlFlowPass(*TM), false);
365 }
366 
367 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
368   return new GCNPassConfig(this, PM);
369 }
370