1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// The AMDGPU target machine contains all of the hardware specific
12 /// information  needed to emit code for R600 and SI GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPU.h"
18 #include "AMDGPUAliasAnalysis.h"
19 #include "AMDGPUCallLowering.h"
20 #include "AMDGPUInstructionSelector.h"
21 #include "AMDGPULegalizerInfo.h"
22 #include "AMDGPUMacroFusion.h"
23 #include "AMDGPUTargetObjectFile.h"
24 #include "AMDGPUTargetTransformInfo.h"
25 #include "GCNIterativeScheduler.h"
26 #include "GCNSchedStrategy.h"
27 #include "R600MachineScheduler.h"
28 #include "SIMachineScheduler.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
33 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/LegacyPassManager.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/TargetRegistry.h"
42 #include "llvm/Target/TargetLoweringObjectFile.h"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/Transforms/IPO/AlwaysInliner.h"
45 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include "llvm/Transforms/Scalar/GVN.h"
48 #include "llvm/Transforms/Vectorize.h"
49 #include <memory>
50 
51 using namespace llvm;
52 
53 static cl::opt<bool> EnableR600StructurizeCFG(
54   "r600-ir-structurize",
55   cl::desc("Use StructurizeCFG IR pass"),
56   cl::init(true));
57 
58 static cl::opt<bool> EnableSROA(
59   "amdgpu-sroa",
60   cl::desc("Run SROA after promote alloca pass"),
61   cl::ReallyHidden,
62   cl::init(true));
63 
64 static cl::opt<bool>
65 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
66                         cl::desc("Run early if-conversion"),
67                         cl::init(false));
68 
69 static cl::opt<bool> EnableR600IfConvert(
70   "r600-if-convert",
71   cl::desc("Use if conversion pass"),
72   cl::ReallyHidden,
73   cl::init(true));
74 
75 // Option to disable vectorizer for tests.
76 static cl::opt<bool> EnableLoadStoreVectorizer(
77   "amdgpu-load-store-vectorizer",
78   cl::desc("Enable load store vectorizer"),
79   cl::init(true),
80   cl::Hidden);
81 
82 // Option to control global loads scalarization
83 static cl::opt<bool> ScalarizeGlobal(
84   "amdgpu-scalarize-global-loads",
85   cl::desc("Enable global load scalarization"),
86   cl::init(true),
87   cl::Hidden);
88 
89 // Option to run internalize pass.
90 static cl::opt<bool> InternalizeSymbols(
91   "amdgpu-internalize-symbols",
92   cl::desc("Enable elimination of non-kernel functions and unused globals"),
93   cl::init(false),
94   cl::Hidden);
95 
96 // Option to inline all early.
97 static cl::opt<bool> EarlyInlineAll(
98   "amdgpu-early-inline-all",
99   cl::desc("Inline all functions early"),
100   cl::init(false),
101   cl::Hidden);
102 
103 static cl::opt<bool> EnableSDWAPeephole(
104   "amdgpu-sdwa-peephole",
105   cl::desc("Enable SDWA peepholer"),
106   cl::init(true));
107 
108 // Enable address space based alias analysis
109 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
110   cl::desc("Enable AMDGPU Alias Analysis"),
111   cl::init(true));
112 
113 // Option to run late CFG structurizer
114 static cl::opt<bool, true> LateCFGStructurize(
115   "amdgpu-late-structurize",
116   cl::desc("Enable late CFG structurization"),
117   cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
118   cl::Hidden);
119 
120 static cl::opt<bool> EnableAMDGPUFunctionCalls(
121   "amdgpu-function-calls",
122   cl::Hidden,
123   cl::desc("Enable AMDGPU function call support"),
124   cl::init(false));
125 
126 // Enable lib calls simplifications
127 static cl::opt<bool> EnableLibCallSimplify(
128   "amdgpu-simplify-libcall",
129   cl::desc("Enable amdgpu library simplifications"),
130   cl::init(true),
131   cl::Hidden);
132 
133 extern "C" void LLVMInitializeAMDGPUTarget() {
134   // Register the target
135   RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
136   RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
137 
138   PassRegistry *PR = PassRegistry::getPassRegistry();
139   initializeR600ClauseMergePassPass(*PR);
140   initializeR600ControlFlowFinalizerPass(*PR);
141   initializeR600PacketizerPass(*PR);
142   initializeR600ExpandSpecialInstrsPassPass(*PR);
143   initializeR600VectorRegMergerPass(*PR);
144   initializeGlobalISel(*PR);
145   initializeAMDGPUDAGToDAGISelPass(*PR);
146   initializeSILowerI1CopiesPass(*PR);
147   initializeSIFixSGPRCopiesPass(*PR);
148   initializeSIFixVGPRCopiesPass(*PR);
149   initializeSIFoldOperandsPass(*PR);
150   initializeSIPeepholeSDWAPass(*PR);
151   initializeSIShrinkInstructionsPass(*PR);
152   initializeSIOptimizeExecMaskingPreRAPass(*PR);
153   initializeSILoadStoreOptimizerPass(*PR);
154   initializeAMDGPUAlwaysInlinePass(*PR);
155   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
156   initializeAMDGPUAnnotateUniformValuesPass(*PR);
157   initializeAMDGPUArgumentUsageInfoPass(*PR);
158   initializeAMDGPULowerKernelAttributesPass(*PR);
159   initializeAMDGPULowerIntrinsicsPass(*PR);
160   initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
161   initializeAMDGPUPromoteAllocaPass(*PR);
162   initializeAMDGPUCodeGenPreparePass(*PR);
163   initializeAMDGPURewriteOutArgumentsPass(*PR);
164   initializeAMDGPUUnifyMetadataPass(*PR);
165   initializeSIAnnotateControlFlowPass(*PR);
166   initializeSIInsertWaitcntsPass(*PR);
167   initializeSIWholeQuadModePass(*PR);
168   initializeSILowerControlFlowPass(*PR);
169   initializeSIInsertSkipsPass(*PR);
170   initializeSIMemoryLegalizerPass(*PR);
171   initializeSIDebuggerInsertNopsPass(*PR);
172   initializeSIOptimizeExecMaskingPass(*PR);
173   initializeSIFixWWMLivenessPass(*PR);
174   initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
175   initializeAMDGPUAAWrapperPassPass(*PR);
176   initializeAMDGPUUseNativeCallsPass(*PR);
177   initializeAMDGPUSimplifyLibCallsPass(*PR);
178   initializeAMDGPUInlinerPass(*PR);
179 }
180 
181 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
182   return llvm::make_unique<AMDGPUTargetObjectFile>();
183 }
184 
185 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
186   return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
187 }
188 
189 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
190   return new SIScheduleDAGMI(C);
191 }
192 
193 static ScheduleDAGInstrs *
194 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
195   ScheduleDAGMILive *DAG =
196     new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
197   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
198   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
199   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
200   return DAG;
201 }
202 
203 static ScheduleDAGInstrs *
204 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
205   auto DAG = new GCNIterativeScheduler(C,
206     GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
207   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
208   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
209   return DAG;
210 }
211 
212 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
213   return new GCNIterativeScheduler(C,
214     GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
215 }
216 
217 static ScheduleDAGInstrs *
218 createIterativeILPMachineScheduler(MachineSchedContext *C) {
219   auto DAG = new GCNIterativeScheduler(C,
220     GCNIterativeScheduler::SCHEDULE_ILP);
221   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
222   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
223   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
224   return DAG;
225 }
226 
227 static MachineSchedRegistry
228 R600SchedRegistry("r600", "Run R600's custom scheduler",
229                    createR600MachineScheduler);
230 
231 static MachineSchedRegistry
232 SISchedRegistry("si", "Run SI's custom scheduler",
233                 createSIMachineScheduler);
234 
235 static MachineSchedRegistry
236 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
237                              "Run GCN scheduler to maximize occupancy",
238                              createGCNMaxOccupancyMachineScheduler);
239 
240 static MachineSchedRegistry
241 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
242   "Run GCN scheduler to maximize occupancy (experimental)",
243   createIterativeGCNMaxOccupancyMachineScheduler);
244 
245 static MachineSchedRegistry
246 GCNMinRegSchedRegistry("gcn-minreg",
247   "Run GCN iterative scheduler for minimal register usage (experimental)",
248   createMinRegScheduler);
249 
250 static MachineSchedRegistry
251 GCNILPSchedRegistry("gcn-ilp",
252   "Run GCN iterative scheduler for ILP scheduling (experimental)",
253   createIterativeILPMachineScheduler);
254 
255 static StringRef computeDataLayout(const Triple &TT) {
256   if (TT.getArch() == Triple::r600) {
257     // 32-bit pointers.
258       return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
259              "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
260   }
261 
262   // 32-bit private, local, and region pointers. 64-bit global, constant and
263   // flat.
264     return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
265          "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
266          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
267 }
268 
269 LLVM_READNONE
270 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
271   if (!GPU.empty())
272     return GPU;
273 
274   if (TT.getArch() == Triple::amdgcn)
275     return "generic";
276 
277   return "r600";
278 }
279 
280 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
281   // The AMDGPU toolchain only supports generating shared objects, so we
282   // must always use PIC.
283   return Reloc::PIC_;
284 }
285 
286 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) {
287   if (CM)
288     return *CM;
289   return CodeModel::Small;
290 }
291 
292 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
293                                          StringRef CPU, StringRef FS,
294                                          TargetOptions Options,
295                                          Optional<Reloc::Model> RM,
296                                          Optional<CodeModel::Model> CM,
297                                          CodeGenOpt::Level OptLevel)
298     : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
299                         FS, Options, getEffectiveRelocModel(RM),
300                         getEffectiveCodeModel(CM), OptLevel),
301       TLOF(createTLOF(getTargetTriple())) {
302   AS = AMDGPU::getAMDGPUAS(TT);
303   initAsmInfo();
304 }
305 
306 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
307 
308 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
309 
310 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
311   Attribute GPUAttr = F.getFnAttribute("target-cpu");
312   return GPUAttr.hasAttribute(Attribute::None) ?
313     getTargetCPU() : GPUAttr.getValueAsString();
314 }
315 
316 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
317   Attribute FSAttr = F.getFnAttribute("target-features");
318 
319   return FSAttr.hasAttribute(Attribute::None) ?
320     getTargetFeatureString() :
321     FSAttr.getValueAsString();
322 }
323 
324 static ImmutablePass *createAMDGPUExternalAAWrapperPass() {
325   return createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) {
326       if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
327         AAR.addAAResult(WrapperPass->getResult());
328       });
329 }
330 
331 /// Predicate for Internalize pass.
332 static bool mustPreserveGV(const GlobalValue &GV) {
333   if (const Function *F = dyn_cast<Function>(&GV))
334     return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
335 
336   return !GV.use_empty();
337 }
338 
339 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
340   Builder.DivergentTarget = true;
341 
342   bool EnableOpt = getOptLevel() > CodeGenOpt::None;
343   bool Internalize = InternalizeSymbols;
344   bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls;
345   bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
346   bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
347 
348   if (EnableAMDGPUFunctionCalls) {
349     delete Builder.Inliner;
350     Builder.Inliner = createAMDGPUFunctionInliningPass();
351   }
352 
353   if (Internalize) {
354     // If we're generating code, we always have the whole program available. The
355     // relocations expected for externally visible functions aren't supported,
356     // so make sure every non-entry function is hidden.
357     Builder.addExtension(
358       PassManagerBuilder::EP_EnabledOnOptLevel0,
359       [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
360         PM.add(createInternalizePass(mustPreserveGV));
361       });
362   }
363 
364   Builder.addExtension(
365     PassManagerBuilder::EP_ModuleOptimizerEarly,
366     [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
367                                          legacy::PassManagerBase &PM) {
368       if (AMDGPUAA) {
369         PM.add(createAMDGPUAAWrapperPass());
370         PM.add(createAMDGPUExternalAAWrapperPass());
371       }
372       PM.add(createAMDGPUUnifyMetadataPass());
373       if (Internalize) {
374         PM.add(createInternalizePass(mustPreserveGV));
375         PM.add(createGlobalDCEPass());
376       }
377       if (EarlyInline)
378         PM.add(createAMDGPUAlwaysInlinePass(false));
379   });
380 
381   const auto &Opt = Options;
382   Builder.addExtension(
383     PassManagerBuilder::EP_EarlyAsPossible,
384     [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
385                                       legacy::PassManagerBase &PM) {
386       if (AMDGPUAA) {
387         PM.add(createAMDGPUAAWrapperPass());
388         PM.add(createAMDGPUExternalAAWrapperPass());
389       }
390       PM.add(llvm::createAMDGPUUseNativeCallsPass());
391       if (LibCallSimplify)
392         PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
393   });
394 
395   Builder.addExtension(
396     PassManagerBuilder::EP_CGSCCOptimizerLate,
397     [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
398       // Add infer address spaces pass to the opt pipeline after inlining
399       // but before SROA to increase SROA opportunities.
400       PM.add(createInferAddressSpacesPass());
401 
402       // This should run after inlining to have any chance of doing anything,
403       // and before other cleanup optimizations.
404       PM.add(createAMDGPULowerKernelAttributesPass());
405   });
406 }
407 
408 //===----------------------------------------------------------------------===//
409 // R600 Target Machine (R600 -> Cayman)
410 //===----------------------------------------------------------------------===//
411 
412 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
413                                      StringRef CPU, StringRef FS,
414                                      TargetOptions Options,
415                                      Optional<Reloc::Model> RM,
416                                      Optional<CodeModel::Model> CM,
417                                      CodeGenOpt::Level OL, bool JIT)
418     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
419   setRequiresStructuredCFG(true);
420 }
421 
422 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
423   const Function &F) const {
424   StringRef GPU = getGPUName(F);
425   StringRef FS = getFeatureString(F);
426 
427   SmallString<128> SubtargetKey(GPU);
428   SubtargetKey.append(FS);
429 
430   auto &I = SubtargetMap[SubtargetKey];
431   if (!I) {
432     // This needs to be done before we create a new subtarget since any
433     // creation will depend on the TM and the code generation flags on the
434     // function that reside in TargetOptions.
435     resetTargetOptions(F);
436     I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
437   }
438 
439   return I.get();
440 }
441 
442 TargetTransformInfo
443 R600TargetMachine::getTargetTransformInfo(const Function &F) {
444   return TargetTransformInfo(R600TTIImpl(this, F));
445 }
446 
447 //===----------------------------------------------------------------------===//
448 // GCN Target Machine (SI+)
449 //===----------------------------------------------------------------------===//
450 
451 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
452                                    StringRef CPU, StringRef FS,
453                                    TargetOptions Options,
454                                    Optional<Reloc::Model> RM,
455                                    Optional<CodeModel::Model> CM,
456                                    CodeGenOpt::Level OL, bool JIT)
457     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
458 
459 const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
460   StringRef GPU = getGPUName(F);
461   StringRef FS = getFeatureString(F);
462 
463   SmallString<128> SubtargetKey(GPU);
464   SubtargetKey.append(FS);
465 
466   auto &I = SubtargetMap[SubtargetKey];
467   if (!I) {
468     // This needs to be done before we create a new subtarget since any
469     // creation will depend on the TM and the code generation flags on the
470     // function that reside in TargetOptions.
471     resetTargetOptions(F);
472     I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
473   }
474 
475   I->setScalarizeGlobalBehavior(ScalarizeGlobal);
476 
477   return I.get();
478 }
479 
480 TargetTransformInfo
481 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
482   return TargetTransformInfo(GCNTTIImpl(this, F));
483 }
484 
485 //===----------------------------------------------------------------------===//
486 // AMDGPU Pass Setup
487 //===----------------------------------------------------------------------===//
488 
489 namespace {
490 
491 class AMDGPUPassConfig : public TargetPassConfig {
492 public:
493   AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
494     : TargetPassConfig(TM, PM) {
495     // Exceptions and StackMaps are not supported, so these passes will never do
496     // anything.
497     disablePass(&StackMapLivenessID);
498     disablePass(&FuncletLayoutID);
499   }
500 
501   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
502     return getTM<AMDGPUTargetMachine>();
503   }
504 
505   ScheduleDAGInstrs *
506   createMachineScheduler(MachineSchedContext *C) const override {
507     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
508     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
509     DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
510     return DAG;
511   }
512 
513   void addEarlyCSEOrGVNPass();
514   void addStraightLineScalarOptimizationPasses();
515   void addIRPasses() override;
516   void addCodeGenPrepare() override;
517   bool addPreISel() override;
518   bool addInstSelector() override;
519   bool addGCPasses() override;
520 };
521 
522 class R600PassConfig final : public AMDGPUPassConfig {
523 public:
524   R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
525     : AMDGPUPassConfig(TM, PM) {}
526 
527   ScheduleDAGInstrs *createMachineScheduler(
528     MachineSchedContext *C) const override {
529     return createR600MachineScheduler(C);
530   }
531 
532   bool addPreISel() override;
533   bool addInstSelector() override;
534   void addPreRegAlloc() override;
535   void addPreSched2() override;
536   void addPreEmitPass() override;
537 };
538 
539 class GCNPassConfig final : public AMDGPUPassConfig {
540 public:
541   GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
542     : AMDGPUPassConfig(TM, PM) {
543     // It is necessary to know the register usage of the entire call graph.  We
544     // allow calls without EnableAMDGPUFunctionCalls if they are marked
545     // noinline, so this is always required.
546     setRequiresCodeGenSCCOrder(true);
547   }
548 
549   GCNTargetMachine &getGCNTargetMachine() const {
550     return getTM<GCNTargetMachine>();
551   }
552 
553   ScheduleDAGInstrs *
554   createMachineScheduler(MachineSchedContext *C) const override;
555 
556   bool addPreISel() override;
557   void addMachineSSAOptimization() override;
558   bool addILPOpts() override;
559   bool addInstSelector() override;
560   bool addIRTranslator() override;
561   bool addLegalizeMachineIR() override;
562   bool addRegBankSelect() override;
563   bool addGlobalInstructionSelect() override;
564   void addFastRegAlloc(FunctionPass *RegAllocPass) override;
565   void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
566   void addPreRegAlloc() override;
567   void addPostRegAlloc() override;
568   void addPreSched2() override;
569   void addPreEmitPass() override;
570 };
571 
572 } // end anonymous namespace
573 
574 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
575   if (getOptLevel() == CodeGenOpt::Aggressive)
576     addPass(createGVNPass());
577   else
578     addPass(createEarlyCSEPass());
579 }
580 
581 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
582   addPass(createSeparateConstOffsetFromGEPPass());
583   addPass(createSpeculativeExecutionPass());
584   // ReassociateGEPs exposes more opportunites for SLSR. See
585   // the example in reassociate-geps-and-slsr.ll.
586   addPass(createStraightLineStrengthReducePass());
587   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
588   // EarlyCSE can reuse.
589   addEarlyCSEOrGVNPass();
590   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
591   addPass(createNaryReassociatePass());
592   // NaryReassociate on GEPs creates redundant common expressions, so run
593   // EarlyCSE after it.
594   addPass(createEarlyCSEPass());
595 }
596 
597 void AMDGPUPassConfig::addIRPasses() {
598   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
599 
600   // There is no reason to run these.
601   disablePass(&StackMapLivenessID);
602   disablePass(&FuncletLayoutID);
603   disablePass(&PatchableFunctionID);
604 
605   addPass(createAMDGPULowerIntrinsicsPass());
606 
607   if (TM.getTargetTriple().getArch() == Triple::r600 ||
608       !EnableAMDGPUFunctionCalls) {
609     // Function calls are not supported, so make sure we inline everything.
610     addPass(createAMDGPUAlwaysInlinePass());
611     addPass(createAlwaysInlinerLegacyPass());
612     // We need to add the barrier noop pass, otherwise adding the function
613     // inlining pass will cause all of the PassConfigs passes to be run
614     // one function at a time, which means if we have a nodule with two
615     // functions, then we will generate code for the first function
616     // without ever running any passes on the second.
617     addPass(createBarrierNoopPass());
618   }
619 
620   if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
621     // TODO: May want to move later or split into an early and late one.
622 
623     addPass(createAMDGPUCodeGenPreparePass());
624   }
625 
626   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
627   if (TM.getTargetTriple().getArch() == Triple::r600)
628     addPass(createR600OpenCLImageTypeLoweringPass());
629 
630   // Replace OpenCL enqueued block function pointers with global variables.
631   addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
632 
633   if (TM.getOptLevel() > CodeGenOpt::None) {
634     addPass(createInferAddressSpacesPass());
635     addPass(createAMDGPUPromoteAlloca());
636 
637     if (EnableSROA)
638       addPass(createSROAPass());
639 
640     addStraightLineScalarOptimizationPasses();
641 
642     if (EnableAMDGPUAliasAnalysis) {
643       addPass(createAMDGPUAAWrapperPass());
644       addPass(createExternalAAWrapperPass([](Pass &P, Function &,
645                                              AAResults &AAR) {
646         if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
647           AAR.addAAResult(WrapperPass->getResult());
648         }));
649     }
650   }
651 
652   TargetPassConfig::addIRPasses();
653 
654   // EarlyCSE is not always strong enough to clean up what LSR produces. For
655   // example, GVN can combine
656   //
657   //   %0 = add %a, %b
658   //   %1 = add %b, %a
659   //
660   // and
661   //
662   //   %0 = shl nsw %a, 2
663   //   %1 = shl %a, 2
664   //
665   // but EarlyCSE can do neither of them.
666   if (getOptLevel() != CodeGenOpt::None)
667     addEarlyCSEOrGVNPass();
668 }
669 
670 void AMDGPUPassConfig::addCodeGenPrepare() {
671   TargetPassConfig::addCodeGenPrepare();
672 
673   if (EnableLoadStoreVectorizer)
674     addPass(createLoadStoreVectorizerPass());
675 }
676 
677 bool AMDGPUPassConfig::addPreISel() {
678   addPass(createFlattenCFGPass());
679   return false;
680 }
681 
682 bool AMDGPUPassConfig::addInstSelector() {
683   addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
684   return false;
685 }
686 
687 bool AMDGPUPassConfig::addGCPasses() {
688   // Do nothing. GC is not supported.
689   return false;
690 }
691 
692 //===----------------------------------------------------------------------===//
693 // R600 Pass Setup
694 //===----------------------------------------------------------------------===//
695 
696 bool R600PassConfig::addPreISel() {
697   AMDGPUPassConfig::addPreISel();
698 
699   if (EnableR600StructurizeCFG)
700     addPass(createStructurizeCFGPass());
701   return false;
702 }
703 
704 bool R600PassConfig::addInstSelector() {
705   addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
706   return false;
707 }
708 
709 void R600PassConfig::addPreRegAlloc() {
710   addPass(createR600VectorRegMerger());
711 }
712 
713 void R600PassConfig::addPreSched2() {
714   addPass(createR600EmitClauseMarkers(), false);
715   if (EnableR600IfConvert)
716     addPass(&IfConverterID, false);
717   addPass(createR600ClauseMergePass(), false);
718 }
719 
720 void R600PassConfig::addPreEmitPass() {
721   addPass(createAMDGPUCFGStructurizerPass(), false);
722   addPass(createR600ExpandSpecialInstrsPass(), false);
723   addPass(&FinalizeMachineBundlesID, false);
724   addPass(createR600Packetizer(), false);
725   addPass(createR600ControlFlowFinalizer(), false);
726 }
727 
728 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
729   return new R600PassConfig(*this, PM);
730 }
731 
732 //===----------------------------------------------------------------------===//
733 // GCN Pass Setup
734 //===----------------------------------------------------------------------===//
735 
736 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
737   MachineSchedContext *C) const {
738   const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
739   if (ST.enableSIScheduler())
740     return createSIMachineScheduler(C);
741   return createGCNMaxOccupancyMachineScheduler(C);
742 }
743 
744 bool GCNPassConfig::addPreISel() {
745   AMDGPUPassConfig::addPreISel();
746 
747   // FIXME: We need to run a pass to propagate the attributes when calls are
748   // supported.
749   addPass(createAMDGPUAnnotateKernelFeaturesPass());
750 
751   // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
752   // regions formed by them.
753   addPass(&AMDGPUUnifyDivergentExitNodesID);
754   if (!LateCFGStructurize) {
755     addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
756   }
757   addPass(createSinkingPass());
758   addPass(createAMDGPUAnnotateUniformValues());
759   if (!LateCFGStructurize) {
760     addPass(createSIAnnotateControlFlowPass());
761   }
762 
763   return false;
764 }
765 
766 void GCNPassConfig::addMachineSSAOptimization() {
767   TargetPassConfig::addMachineSSAOptimization();
768 
769   // We want to fold operands after PeepholeOptimizer has run (or as part of
770   // it), because it will eliminate extra copies making it easier to fold the
771   // real source operand. We want to eliminate dead instructions after, so that
772   // we see fewer uses of the copies. We then need to clean up the dead
773   // instructions leftover after the operands are folded as well.
774   //
775   // XXX - Can we get away without running DeadMachineInstructionElim again?
776   addPass(&SIFoldOperandsID);
777   addPass(&DeadMachineInstructionElimID);
778   addPass(&SILoadStoreOptimizerID);
779   if (EnableSDWAPeephole) {
780     addPass(&SIPeepholeSDWAID);
781     addPass(&EarlyMachineLICMID);
782     addPass(&MachineCSEID);
783     addPass(&SIFoldOperandsID);
784     addPass(&DeadMachineInstructionElimID);
785   }
786   addPass(createSIShrinkInstructionsPass());
787 }
788 
789 bool GCNPassConfig::addILPOpts() {
790   if (EnableEarlyIfConversion)
791     addPass(&EarlyIfConverterID);
792 
793   TargetPassConfig::addILPOpts();
794   return false;
795 }
796 
797 bool GCNPassConfig::addInstSelector() {
798   AMDGPUPassConfig::addInstSelector();
799   addPass(createSILowerI1CopiesPass());
800   addPass(&SIFixSGPRCopiesID);
801   return false;
802 }
803 
804 bool GCNPassConfig::addIRTranslator() {
805   addPass(new IRTranslator());
806   return false;
807 }
808 
809 bool GCNPassConfig::addLegalizeMachineIR() {
810   addPass(new Legalizer());
811   return false;
812 }
813 
814 bool GCNPassConfig::addRegBankSelect() {
815   addPass(new RegBankSelect());
816   return false;
817 }
818 
819 bool GCNPassConfig::addGlobalInstructionSelect() {
820   addPass(new InstructionSelect());
821   return false;
822 }
823 
824 void GCNPassConfig::addPreRegAlloc() {
825   if (LateCFGStructurize) {
826     addPass(createAMDGPUMachineCFGStructurizerPass());
827   }
828   addPass(createSIWholeQuadModePass());
829 }
830 
831 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
832   // FIXME: We have to disable the verifier here because of PHIElimination +
833   // TwoAddressInstructions disabling it.
834 
835   // This must be run immediately after phi elimination and before
836   // TwoAddressInstructions, otherwise the processing of the tied operand of
837   // SI_ELSE will introduce a copy of the tied operand source after the else.
838   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
839 
840   // This must be run after SILowerControlFlow, since it needs to use the
841   // machine-level CFG, but before register allocation.
842   insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
843 
844   TargetPassConfig::addFastRegAlloc(RegAllocPass);
845 }
846 
847 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
848   insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
849 
850   // This must be run immediately after phi elimination and before
851   // TwoAddressInstructions, otherwise the processing of the tied operand of
852   // SI_ELSE will introduce a copy of the tied operand source after the else.
853   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
854 
855   // This must be run after SILowerControlFlow, since it needs to use the
856   // machine-level CFG, but before register allocation.
857   insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
858 
859   TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
860 }
861 
862 void GCNPassConfig::addPostRegAlloc() {
863   addPass(&SIFixVGPRCopiesID);
864   addPass(&SIOptimizeExecMaskingID);
865   TargetPassConfig::addPostRegAlloc();
866 }
867 
868 void GCNPassConfig::addPreSched2() {
869 }
870 
871 void GCNPassConfig::addPreEmitPass() {
872   // The hazard recognizer that runs as part of the post-ra scheduler does not
873   // guarantee to be able handle all hazards correctly. This is because if there
874   // are multiple scheduling regions in a basic block, the regions are scheduled
875   // bottom up, so when we begin to schedule a region we don't know what
876   // instructions were emitted directly before it.
877   //
878   // Here we add a stand-alone hazard recognizer pass which can handle all
879   // cases.
880   addPass(&PostRAHazardRecognizerID);
881 
882   addPass(createSIMemoryLegalizerPass());
883   addPass(createSIInsertWaitcntsPass());
884   addPass(createSIShrinkInstructionsPass());
885   addPass(&SIInsertSkipsPassID);
886   addPass(createSIDebuggerInsertNopsPass());
887   addPass(&BranchRelaxationPassID);
888 }
889 
890 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
891   return new GCNPassConfig(*this, PM);
892 }
893