1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/IR/LegacyPassManager.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_os_ostream.h"
35 #include "llvm/Transforms/IPO.h"
36 #include "llvm/Transforms/Scalar.h"
37 #include <llvm/CodeGen/Passes.h>
38 
39 using namespace llvm;
40 
41 extern "C" void LLVMInitializeAMDGPUTarget() {
42   // Register the target
43   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
44   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
45 
46   PassRegistry *PR = PassRegistry::getPassRegistry();
47   initializeSILowerI1CopiesPass(*PR);
48   initializeSIFixSGPRCopiesPass(*PR);
49   initializeSIFoldOperandsPass(*PR);
50   initializeSIFixSGPRLiveRangesPass(*PR);
51   initializeSIFixControlFlowLiveIntervalsPass(*PR);
52   initializeSILoadStoreOptimizerPass(*PR);
53   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
54   initializeAMDGPUAnnotateUniformValuesPass(*PR);
55   initializeSIAnnotateControlFlowPass(*PR);
56 }
57 
58 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
59   if (TT.getOS() == Triple::AMDHSA)
60     return make_unique<AMDGPUHSATargetObjectFile>();
61 
62   return make_unique<AMDGPUTargetObjectFile>();
63 }
64 
65 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
66   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
67 }
68 
69 static MachineSchedRegistry
70 R600SchedRegistry("r600", "Run R600's custom scheduler",
71                    createR600MachineScheduler);
72 
73 static MachineSchedRegistry
74 SISchedRegistry("si", "Run SI's custom scheduler",
75                 createSIMachineScheduler);
76 
77 static std::string computeDataLayout(const Triple &TT) {
78   std::string Ret = "e-p:32:32";
79 
80   if (TT.getArch() == Triple::amdgcn) {
81     // 32-bit private, local, and region pointers. 64-bit global and constant.
82     Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
83   }
84 
85   Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
86          "-v512:512-v1024:1024-v2048:2048-n32:64";
87 
88   return Ret;
89 }
90 
91 LLVM_READNONE
92 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
93   if (!GPU.empty())
94     return GPU;
95 
96   // HSA only supports CI+, so change the default GPU to a CI for HSA.
97   if (TT.getArch() == Triple::amdgcn)
98     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
99 
100   return "";
101 }
102 
103 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
104                                          StringRef CPU, StringRef FS,
105                                          TargetOptions Options, Reloc::Model RM,
106                                          CodeModel::Model CM,
107                                          CodeGenOpt::Level OptLevel)
108     : LLVMTargetMachine(T, computeDataLayout(TT), TT,
109                         getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
110                         OptLevel),
111       TLOF(createTLOF(getTargetTriple())),
112       Subtarget(TT, getTargetCPU(), FS, *this),
113       IntrinsicInfo() {
114   setRequiresStructuredCFG(true);
115   initAsmInfo();
116 }
117 
118 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
119 
120 //===----------------------------------------------------------------------===//
121 // R600 Target Machine (R600 -> Cayman)
122 //===----------------------------------------------------------------------===//
123 
124 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
125                                      StringRef FS, StringRef CPU,
126                                      TargetOptions Options, Reloc::Model RM,
127                                      CodeModel::Model CM, CodeGenOpt::Level OL)
128     : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
129 
130 //===----------------------------------------------------------------------===//
131 // GCN Target Machine (SI+)
132 //===----------------------------------------------------------------------===//
133 
134 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
135                                    StringRef FS, StringRef CPU,
136                                    TargetOptions Options, Reloc::Model RM,
137                                    CodeModel::Model CM, CodeGenOpt::Level OL)
138     : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
139 
140 //===----------------------------------------------------------------------===//
141 // AMDGPU Pass Setup
142 //===----------------------------------------------------------------------===//
143 
144 namespace {
145 class AMDGPUPassConfig : public TargetPassConfig {
146 public:
147   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
148     : TargetPassConfig(TM, PM) {
149 
150     // Exceptions and StackMaps are not supported, so these passes will never do
151     // anything.
152     disablePass(&StackMapLivenessID);
153     disablePass(&FuncletLayoutID);
154   }
155 
156   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
157     return getTM<AMDGPUTargetMachine>();
158   }
159 
160   ScheduleDAGInstrs *
161   createMachineScheduler(MachineSchedContext *C) const override {
162     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
163     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
164       return createR600MachineScheduler(C);
165     else if (ST.enableSIScheduler())
166       return createSIMachineScheduler(C);
167     return nullptr;
168   }
169 
170   void addIRPasses() override;
171   void addCodeGenPrepare() override;
172   bool addPreISel() override;
173   bool addInstSelector() override;
174   bool addGCPasses() override;
175 };
176 
177 class R600PassConfig : public AMDGPUPassConfig {
178 public:
179   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
180     : AMDGPUPassConfig(TM, PM) { }
181 
182   bool addPreISel() override;
183   void addPreRegAlloc() override;
184   void addPreSched2() override;
185   void addPreEmitPass() override;
186 };
187 
188 class GCNPassConfig : public AMDGPUPassConfig {
189 public:
190   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
191     : AMDGPUPassConfig(TM, PM) { }
192   bool addPreISel() override;
193   bool addInstSelector() override;
194   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
195   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
196   void addPreRegAlloc() override;
197   void addPostRegAlloc() override;
198   void addPreSched2() override;
199   void addPreEmitPass() override;
200 };
201 
202 } // End of anonymous namespace
203 
204 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
205   return TargetIRAnalysis([this](const Function &F) {
206     return TargetTransformInfo(
207         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
208   });
209 }
210 
211 void AMDGPUPassConfig::addIRPasses() {
212   // Function calls are not supported, so make sure we inline everything.
213   addPass(createAMDGPUAlwaysInlinePass());
214   addPass(createAlwaysInlinerPass());
215   // We need to add the barrier noop pass, otherwise adding the function
216   // inlining pass will cause all of the PassConfigs passes to be run
217   // one function at a time, which means if we have a nodule with two
218   // functions, then we will generate code for the first function
219   // without ever running any passes on the second.
220   addPass(createBarrierNoopPass());
221 
222   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
223   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
224 
225   TargetPassConfig::addIRPasses();
226 }
227 
228 void AMDGPUPassConfig::addCodeGenPrepare() {
229   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
230   if (ST.isPromoteAllocaEnabled()) {
231     addPass(createAMDGPUPromoteAlloca(ST));
232     addPass(createSROAPass());
233   }
234   TargetPassConfig::addCodeGenPrepare();
235 }
236 
237 bool
238 AMDGPUPassConfig::addPreISel() {
239   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
240   addPass(createFlattenCFGPass());
241   if (ST.IsIRStructurizerEnabled())
242     addPass(createStructurizeCFGPass());
243   return false;
244 }
245 
246 bool AMDGPUPassConfig::addInstSelector() {
247   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
248   return false;
249 }
250 
251 bool AMDGPUPassConfig::addGCPasses() {
252   // Do nothing. GC is not supported.
253   return false;
254 }
255 
256 //===----------------------------------------------------------------------===//
257 // R600 Pass Setup
258 //===----------------------------------------------------------------------===//
259 
260 bool R600PassConfig::addPreISel() {
261   AMDGPUPassConfig::addPreISel();
262   addPass(createR600TextureIntrinsicsReplacer());
263   return false;
264 }
265 
266 void R600PassConfig::addPreRegAlloc() {
267   addPass(createR600VectorRegMerger(*TM));
268 }
269 
270 void R600PassConfig::addPreSched2() {
271   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
272   addPass(createR600EmitClauseMarkers(), false);
273   if (ST.isIfCvtEnabled())
274     addPass(&IfConverterID, false);
275   addPass(createR600ClauseMergePass(*TM), false);
276 }
277 
278 void R600PassConfig::addPreEmitPass() {
279   addPass(createAMDGPUCFGStructurizerPass(), false);
280   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
281   addPass(&FinalizeMachineBundlesID, false);
282   addPass(createR600Packetizer(*TM), false);
283   addPass(createR600ControlFlowFinalizer(*TM), false);
284 }
285 
286 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
287   return new R600PassConfig(this, PM);
288 }
289 
290 //===----------------------------------------------------------------------===//
291 // GCN Pass Setup
292 //===----------------------------------------------------------------------===//
293 
294 bool GCNPassConfig::addPreISel() {
295   AMDGPUPassConfig::addPreISel();
296 
297   // FIXME: We need to run a pass to propagate the attributes when calls are
298   // supported.
299   addPass(&AMDGPUAnnotateKernelFeaturesID);
300 
301   addPass(createSinkingPass());
302   addPass(createSITypeRewriter());
303   addPass(createSIAnnotateControlFlowPass());
304   addPass(createAMDGPUAnnotateUniformValues());
305 
306   return false;
307 }
308 
309 bool GCNPassConfig::addInstSelector() {
310   AMDGPUPassConfig::addInstSelector();
311   addPass(createSILowerI1CopiesPass());
312   addPass(&SIFixSGPRCopiesID);
313   addPass(createSIFoldOperandsPass());
314   return false;
315 }
316 
317 void GCNPassConfig::addPreRegAlloc() {
318   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
319 
320   // This needs to be run directly before register allocation because
321   // earlier passes might recompute live intervals.
322   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
323   if (getOptLevel() > CodeGenOpt::None) {
324     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
325   }
326 
327   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
328     // Don't do this with no optimizations since it throws away debug info by
329     // merging nonadjacent loads.
330 
331     // This should be run after scheduling, but before register allocation. It
332     // also need extra copies to the address operand to be eliminated.
333     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
334     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
335   }
336   addPass(createSIShrinkInstructionsPass(), false);
337 }
338 
339 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
340   addPass(&SIFixSGPRLiveRangesID);
341   TargetPassConfig::addFastRegAlloc(RegAllocPass);
342 }
343 
344 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
345   // We want to run this after LiveVariables is computed to avoid computing them
346   // twice.
347   // FIXME: We shouldn't disable the verifier here. r249087 introduced a failure
348   // that needs to be fixed.
349   insertPass(&LiveVariablesID, &SIFixSGPRLiveRangesID, /*VerifyAfter=*/false);
350   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
351 }
352 
353 void GCNPassConfig::addPostRegAlloc() {
354   addPass(createSIShrinkInstructionsPass(), false);
355 }
356 
357 void GCNPassConfig::addPreSched2() {
358 }
359 
360 void GCNPassConfig::addPreEmitPass() {
361   addPass(createSIInsertWaits(*TM), false);
362   addPass(createSILowerControlFlowPass(*TM), false);
363 }
364 
365 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
366   return new GCNPassConfig(this, PM);
367 }
368