1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUTargetObjectFile.h"
18 #include "AMDGPU.h"
19 #include "AMDGPUTargetTransformInfo.h"
20 #include "R600ISelLowering.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineScheduler.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "llvm/Analysis/Passes.h"
26 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
27 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/IR/Verifier.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/IR/LegacyPassManager.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Support/raw_os_ostream.h"
37 #include "llvm/Transforms/IPO.h"
38 #include "llvm/Transforms/Scalar.h"
39 #include <llvm/CodeGen/Passes.h>
40 
41 using namespace llvm;
42 
43 extern "C" void LLVMInitializeAMDGPUTarget() {
44   // Register the target
45   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
46   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
47 
48   PassRegistry *PR = PassRegistry::getPassRegistry();
49   initializeSILowerI1CopiesPass(*PR);
50   initializeSIFixSGPRCopiesPass(*PR);
51   initializeSIFoldOperandsPass(*PR);
52   initializeSIFixControlFlowLiveIntervalsPass(*PR);
53   initializeSILoadStoreOptimizerPass(*PR);
54   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
55   initializeAMDGPUAnnotateUniformValuesPass(*PR);
56   initializeAMDGPUPromoteAllocaPass(*PR);
57   initializeSIAnnotateControlFlowPass(*PR);
58   initializeSIDebuggerInsertNopsPass(*PR);
59   initializeSIInsertWaitsPass(*PR);
60   initializeSIWholeQuadModePass(*PR);
61   initializeSILowerControlFlowPass(*PR);
62   initializeSIDebuggerInsertNopsPass(*PR);
63 }
64 
65 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
66   return make_unique<AMDGPUTargetObjectFile>();
67 }
68 
69 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
70   return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
71 }
72 
73 static MachineSchedRegistry
74 R600SchedRegistry("r600", "Run R600's custom scheduler",
75                    createR600MachineScheduler);
76 
77 static MachineSchedRegistry
78 SISchedRegistry("si", "Run SI's custom scheduler",
79                 createSIMachineScheduler);
80 
81 static StringRef computeDataLayout(const Triple &TT) {
82   if (TT.getArch() == Triple::r600) {
83     // 32-bit pointers.
84     return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
85             "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
86   }
87 
88   // 32-bit private, local, and region pointers. 64-bit global, constant and
89   // flat.
90   return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
91          "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
92          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
93 }
94 
95 LLVM_READNONE
96 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
97   if (!GPU.empty())
98     return GPU;
99 
100   // HSA only supports CI+, so change the default GPU to a CI for HSA.
101   if (TT.getArch() == Triple::amdgcn)
102     return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
103 
104   return "";
105 }
106 
107 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
108   if (!RM.hasValue())
109     return Reloc::PIC_;
110   return *RM;
111 }
112 
113 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
114                                          StringRef CPU, StringRef FS,
115                                          TargetOptions Options,
116                                          Optional<Reloc::Model> RM,
117                                          CodeModel::Model CM,
118                                          CodeGenOpt::Level OptLevel)
119     : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
120                         FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
121       TLOF(createTLOF(getTargetTriple())),
122       Subtarget(TT, getTargetCPU(), FS, *this), IntrinsicInfo() {
123   setRequiresStructuredCFG(true);
124   initAsmInfo();
125 }
126 
127 AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
128 
129 //===----------------------------------------------------------------------===//
130 // R600 Target Machine (R600 -> Cayman)
131 //===----------------------------------------------------------------------===//
132 
133 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
134                                      StringRef CPU, StringRef FS,
135                                      TargetOptions Options,
136                                      Optional<Reloc::Model> RM,
137                                      CodeModel::Model CM, CodeGenOpt::Level OL)
138     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
139 
140 //===----------------------------------------------------------------------===//
141 // GCN Target Machine (SI+)
142 //===----------------------------------------------------------------------===//
143 
144 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
145                                    StringRef CPU, StringRef FS,
146                                    TargetOptions Options,
147                                    Optional<Reloc::Model> RM,
148                                    CodeModel::Model CM, CodeGenOpt::Level OL)
149     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
150 
151 //===----------------------------------------------------------------------===//
152 // AMDGPU Pass Setup
153 //===----------------------------------------------------------------------===//
154 
155 namespace {
156 
157 class AMDGPUPassConfig : public TargetPassConfig {
158 public:
159   AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
160     : TargetPassConfig(TM, PM) {
161 
162     // Exceptions and StackMaps are not supported, so these passes will never do
163     // anything.
164     disablePass(&StackMapLivenessID);
165     disablePass(&FuncletLayoutID);
166   }
167 
168   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
169     return getTM<AMDGPUTargetMachine>();
170   }
171 
172   ScheduleDAGInstrs *
173   createMachineScheduler(MachineSchedContext *C) const override {
174     const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
175     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
176       return createR600MachineScheduler(C);
177     else if (ST.enableSIScheduler())
178       return createSIMachineScheduler(C);
179     return nullptr;
180   }
181 
182   void addIRPasses() override;
183   void addCodeGenPrepare() override;
184   bool addPreISel() override;
185   bool addInstSelector() override;
186   bool addGCPasses() override;
187 };
188 
189 class R600PassConfig final : public AMDGPUPassConfig {
190 public:
191   R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
192     : AMDGPUPassConfig(TM, PM) { }
193 
194   bool addPreISel() override;
195   void addPreRegAlloc() override;
196   void addPreSched2() override;
197   void addPreEmitPass() override;
198 };
199 
200 class GCNPassConfig final : public AMDGPUPassConfig {
201 public:
202   GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
203     : AMDGPUPassConfig(TM, PM) { }
204   bool addPreISel() override;
205   void addMachineSSAOptimization() override;
206   bool addInstSelector() override;
207 #ifdef LLVM_BUILD_GLOBAL_ISEL
208   bool addIRTranslator() override;
209   bool addRegBankSelect() override;
210 #endif
211   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
212   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
213   void addPreRegAlloc() override;
214   void addPreSched2() override;
215   void addPreEmitPass() override;
216 };
217 
218 } // End of anonymous namespace
219 
220 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
221   return TargetIRAnalysis([this](const Function &F) {
222     return TargetTransformInfo(
223         AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
224   });
225 }
226 
227 void AMDGPUPassConfig::addIRPasses() {
228   // There is no reason to run these.
229   disablePass(&StackMapLivenessID);
230   disablePass(&FuncletLayoutID);
231   disablePass(&PatchableFunctionID);
232 
233   // Function calls are not supported, so make sure we inline everything.
234   addPass(createAMDGPUAlwaysInlinePass());
235   addPass(createAlwaysInlinerPass());
236   // We need to add the barrier noop pass, otherwise adding the function
237   // inlining pass will cause all of the PassConfigs passes to be run
238   // one function at a time, which means if we have a nodule with two
239   // functions, then we will generate code for the first function
240   // without ever running any passes on the second.
241   addPass(createBarrierNoopPass());
242 
243   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
244   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
245 
246   TargetPassConfig::addIRPasses();
247 }
248 
249 void AMDGPUPassConfig::addCodeGenPrepare() {
250   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
251   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
252   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
253     addPass(createAMDGPUPromoteAlloca(&TM));
254     addPass(createSROAPass());
255   }
256   TargetPassConfig::addCodeGenPrepare();
257 }
258 
259 bool
260 AMDGPUPassConfig::addPreISel() {
261   addPass(createFlattenCFGPass());
262   return false;
263 }
264 
265 bool AMDGPUPassConfig::addInstSelector() {
266   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
267   return false;
268 }
269 
270 bool AMDGPUPassConfig::addGCPasses() {
271   // Do nothing. GC is not supported.
272   return false;
273 }
274 
275 //===----------------------------------------------------------------------===//
276 // R600 Pass Setup
277 //===----------------------------------------------------------------------===//
278 
279 bool R600PassConfig::addPreISel() {
280   AMDGPUPassConfig::addPreISel();
281   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
282   if (ST.IsIRStructurizerEnabled())
283     addPass(createStructurizeCFGPass());
284   addPass(createR600TextureIntrinsicsReplacer());
285   return false;
286 }
287 
288 void R600PassConfig::addPreRegAlloc() {
289   addPass(createR600VectorRegMerger(*TM));
290 }
291 
292 void R600PassConfig::addPreSched2() {
293   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
294   addPass(createR600EmitClauseMarkers(), false);
295   if (ST.isIfCvtEnabled())
296     addPass(&IfConverterID, false);
297   addPass(createR600ClauseMergePass(*TM), false);
298 }
299 
300 void R600PassConfig::addPreEmitPass() {
301   addPass(createAMDGPUCFGStructurizerPass(), false);
302   addPass(createR600ExpandSpecialInstrsPass(*TM), false);
303   addPass(&FinalizeMachineBundlesID, false);
304   addPass(createR600Packetizer(*TM), false);
305   addPass(createR600ControlFlowFinalizer(*TM), false);
306 }
307 
308 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
309   return new R600PassConfig(this, PM);
310 }
311 
312 //===----------------------------------------------------------------------===//
313 // GCN Pass Setup
314 //===----------------------------------------------------------------------===//
315 
316 bool GCNPassConfig::addPreISel() {
317   AMDGPUPassConfig::addPreISel();
318 
319   // FIXME: We need to run a pass to propagate the attributes when calls are
320   // supported.
321   addPass(&AMDGPUAnnotateKernelFeaturesID);
322   addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
323   addPass(createSinkingPass());
324   addPass(createSITypeRewriter());
325   addPass(createAMDGPUAnnotateUniformValues());
326   addPass(createSIAnnotateControlFlowPass());
327 
328   return false;
329 }
330 
331 void GCNPassConfig::addMachineSSAOptimization() {
332   TargetPassConfig::addMachineSSAOptimization();
333 
334   // We want to fold operands after PeepholeOptimizer has run (or as part of
335   // it), because it will eliminate extra copies making it easier to fold the
336   // real source operand. We want to eliminate dead instructions after, so that
337   // we see fewer uses of the copies. We then need to clean up the dead
338   // instructions leftover after the operands are folded as well.
339   //
340   // XXX - Can we get away without running DeadMachineInstructionElim again?
341   addPass(&SIFoldOperandsID);
342   addPass(&DeadMachineInstructionElimID);
343 }
344 
345 bool GCNPassConfig::addInstSelector() {
346   AMDGPUPassConfig::addInstSelector();
347   addPass(createSILowerI1CopiesPass());
348   addPass(&SIFixSGPRCopiesID);
349   return false;
350 }
351 
352 #ifdef LLVM_BUILD_GLOBAL_ISEL
353 bool GCNPassConfig::addIRTranslator() {
354   addPass(new IRTranslator());
355   return false;
356 }
357 
358 bool GCNPassConfig::addRegBankSelect() {
359   return false;
360 }
361 #endif
362 
363 void GCNPassConfig::addPreRegAlloc() {
364   const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
365 
366   // This needs to be run directly before register allocation because
367   // earlier passes might recompute live intervals.
368   // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
369   if (getOptLevel() > CodeGenOpt::None) {
370     insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
371   }
372 
373   if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
374     // Don't do this with no optimizations since it throws away debug info by
375     // merging nonadjacent loads.
376 
377     // This should be run after scheduling, but before register allocation. It
378     // also need extra copies to the address operand to be eliminated.
379     insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
380     insertPass(&MachineSchedulerID, &RegisterCoalescerID);
381   }
382   addPass(createSIShrinkInstructionsPass(), false);
383   addPass(createSIWholeQuadModePass());
384 }
385 
386 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
387   TargetPassConfig::addFastRegAlloc(RegAllocPass);
388 }
389 
390 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
391   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
392 }
393 
394 void GCNPassConfig::addPreSched2() {
395 }
396 
397 void GCNPassConfig::addPreEmitPass() {
398 
399   // The hazard recognizer that runs as part of the post-ra scheduler does not
400   // gaurantee to be able handle all hazards correctly.  This is because
401   // if there are multiple scheduling regions in a basic block, the regions
402   // are scheduled bottom up, so when we begin to schedule a region we don't
403   // know what instructions were emitted directly before it.
404   //
405   // Here we add a stand-alone hazard recognizer pass which can handle all cases.
406   // hazard recognizer pass.
407   addPass(&PostRAHazardRecognizerID);
408 
409   addPass(createSIInsertWaitsPass(), false);
410   addPass(createSIShrinkInstructionsPass());
411   addPass(createSILowerControlFlowPass(), false);
412   addPass(createSIDebuggerInsertNopsPass(), false);
413 }
414 
415 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
416   return new GCNPassConfig(this, PM);
417 }
418