1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The AMDGPU target machine contains all of the hardware specific
11 /// information  needed to emit code for R600 and SI GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUTargetMachine.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUAliasAnalysis.h"
18 #include "AMDGPUCallLowering.h"
19 #include "AMDGPUInstructionSelector.h"
20 #include "AMDGPULegalizerInfo.h"
21 #include "AMDGPUMacroFusion.h"
22 #include "AMDGPUTargetObjectFile.h"
23 #include "AMDGPUTargetTransformInfo.h"
24 #include "GCNIterativeScheduler.h"
25 #include "GCNSchedStrategy.h"
26 #include "R600MachineScheduler.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIMachineScheduler.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
33 #include "llvm/CodeGen/MIRParser/MIParser.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/CodeGen/TargetPassConfig.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/LegacyPassManager.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Compiler.h"
42 #include "llvm/Support/TargetRegistry.h"
43 #include "llvm/Target/TargetLoweringObjectFile.h"
44 #include "llvm/Transforms/IPO.h"
45 #include "llvm/Transforms/IPO/AlwaysInliner.h"
46 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
47 #include "llvm/Transforms/Scalar.h"
48 #include "llvm/Transforms/Scalar/GVN.h"
49 #include "llvm/Transforms/Utils.h"
50 #include "llvm/Transforms/Vectorize.h"
51 #include <memory>
52 
53 using namespace llvm;
54 
55 static cl::opt<bool> EnableR600StructurizeCFG(
56   "r600-ir-structurize",
57   cl::desc("Use StructurizeCFG IR pass"),
58   cl::init(true));
59 
60 static cl::opt<bool> EnableSROA(
61   "amdgpu-sroa",
62   cl::desc("Run SROA after promote alloca pass"),
63   cl::ReallyHidden,
64   cl::init(true));
65 
66 static cl::opt<bool>
67 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
68                         cl::desc("Run early if-conversion"),
69                         cl::init(false));
70 
71 static cl::opt<bool>
72 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
73             cl::desc("Run pre-RA exec mask optimizations"),
74             cl::init(true));
75 
76 static cl::opt<bool> EnableR600IfConvert(
77   "r600-if-convert",
78   cl::desc("Use if conversion pass"),
79   cl::ReallyHidden,
80   cl::init(true));
81 
82 // Option to disable vectorizer for tests.
83 static cl::opt<bool> EnableLoadStoreVectorizer(
84   "amdgpu-load-store-vectorizer",
85   cl::desc("Enable load store vectorizer"),
86   cl::init(true),
87   cl::Hidden);
88 
89 // Option to control global loads scalarization
90 static cl::opt<bool> ScalarizeGlobal(
91   "amdgpu-scalarize-global-loads",
92   cl::desc("Enable global load scalarization"),
93   cl::init(true),
94   cl::Hidden);
95 
96 // Option to run internalize pass.
97 static cl::opt<bool> InternalizeSymbols(
98   "amdgpu-internalize-symbols",
99   cl::desc("Enable elimination of non-kernel functions and unused globals"),
100   cl::init(false),
101   cl::Hidden);
102 
103 // Option to inline all early.
104 static cl::opt<bool> EarlyInlineAll(
105   "amdgpu-early-inline-all",
106   cl::desc("Inline all functions early"),
107   cl::init(false),
108   cl::Hidden);
109 
110 static cl::opt<bool> EnableSDWAPeephole(
111   "amdgpu-sdwa-peephole",
112   cl::desc("Enable SDWA peepholer"),
113   cl::init(true));
114 
115 static cl::opt<bool> EnableDPPCombine(
116   "amdgpu-dpp-combine",
117   cl::desc("Enable DPP combiner"),
118   cl::init(true));
119 
120 // Enable address space based alias analysis
121 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
122   cl::desc("Enable AMDGPU Alias Analysis"),
123   cl::init(true));
124 
125 // Option to run late CFG structurizer
126 static cl::opt<bool, true> LateCFGStructurize(
127   "amdgpu-late-structurize",
128   cl::desc("Enable late CFG structurization"),
129   cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
130   cl::Hidden);
131 
132 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
133   "amdgpu-function-calls",
134   cl::desc("Enable AMDGPU function call support"),
135   cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
136   cl::init(true),
137   cl::Hidden);
138 
139 // Enable lib calls simplifications
140 static cl::opt<bool> EnableLibCallSimplify(
141   "amdgpu-simplify-libcall",
142   cl::desc("Enable amdgpu library simplifications"),
143   cl::init(true),
144   cl::Hidden);
145 
146 static cl::opt<bool> EnableLowerKernelArguments(
147   "amdgpu-ir-lower-kernel-arguments",
148   cl::desc("Lower kernel argument loads in IR pass"),
149   cl::init(true),
150   cl::Hidden);
151 
152 // Enable atomic optimization
153 static cl::opt<bool> EnableAtomicOptimizations(
154   "amdgpu-atomic-optimizations",
155   cl::desc("Enable atomic optimizations"),
156   cl::init(false),
157   cl::Hidden);
158 
159 // Enable Mode register optimization
160 static cl::opt<bool> EnableSIModeRegisterPass(
161   "amdgpu-mode-register",
162   cl::desc("Enable mode register pass"),
163   cl::init(true),
164   cl::Hidden);
165 
166 // Option is used in lit tests to prevent deadcoding of patterns inspected.
167 static cl::opt<bool>
168 EnableDCEInRA("amdgpu-dce-in-ra",
169     cl::init(true), cl::Hidden,
170     cl::desc("Enable machine DCE inside regalloc"));
171 
172 static cl::opt<bool> EnableScalarIRPasses(
173   "amdgpu-scalar-ir-passes",
174   cl::desc("Enable scalar IR passes"),
175   cl::init(true),
176   cl::Hidden);
177 
178 extern "C" void LLVMInitializeAMDGPUTarget() {
179   // Register the target
180   RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
181   RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
182 
183   PassRegistry *PR = PassRegistry::getPassRegistry();
184   initializeR600ClauseMergePassPass(*PR);
185   initializeR600ControlFlowFinalizerPass(*PR);
186   initializeR600PacketizerPass(*PR);
187   initializeR600ExpandSpecialInstrsPassPass(*PR);
188   initializeR600VectorRegMergerPass(*PR);
189   initializeGlobalISel(*PR);
190   initializeAMDGPUDAGToDAGISelPass(*PR);
191   initializeGCNDPPCombinePass(*PR);
192   initializeSILowerI1CopiesPass(*PR);
193   initializeSIFixSGPRCopiesPass(*PR);
194   initializeSIFixVGPRCopiesPass(*PR);
195   initializeSIFixupVectorISelPass(*PR);
196   initializeSIFoldOperandsPass(*PR);
197   initializeSIPeepholeSDWAPass(*PR);
198   initializeSIShrinkInstructionsPass(*PR);
199   initializeSIOptimizeExecMaskingPreRAPass(*PR);
200   initializeSILoadStoreOptimizerPass(*PR);
201   initializeAMDGPUFixFunctionBitcastsPass(*PR);
202   initializeAMDGPUAlwaysInlinePass(*PR);
203   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
204   initializeAMDGPUAnnotateUniformValuesPass(*PR);
205   initializeAMDGPUArgumentUsageInfoPass(*PR);
206   initializeAMDGPUAtomicOptimizerPass(*PR);
207   initializeAMDGPULowerKernelArgumentsPass(*PR);
208   initializeAMDGPULowerKernelAttributesPass(*PR);
209   initializeAMDGPULowerIntrinsicsPass(*PR);
210   initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
211   initializeAMDGPUPromoteAllocaPass(*PR);
212   initializeAMDGPUCodeGenPreparePass(*PR);
213   initializeAMDGPURewriteOutArgumentsPass(*PR);
214   initializeAMDGPUUnifyMetadataPass(*PR);
215   initializeSIAnnotateControlFlowPass(*PR);
216   initializeSIInsertWaitcntsPass(*PR);
217   initializeSIModeRegisterPass(*PR);
218   initializeSIWholeQuadModePass(*PR);
219   initializeSILowerControlFlowPass(*PR);
220   initializeSIInsertSkipsPass(*PR);
221   initializeSIMemoryLegalizerPass(*PR);
222   initializeSIOptimizeExecMaskingPass(*PR);
223   initializeSIPreAllocateWWMRegsPass(*PR);
224   initializeSIFormMemoryClausesPass(*PR);
225   initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
226   initializeAMDGPUAAWrapperPassPass(*PR);
227   initializeAMDGPUExternalAAWrapperPass(*PR);
228   initializeAMDGPUUseNativeCallsPass(*PR);
229   initializeAMDGPUSimplifyLibCallsPass(*PR);
230   initializeAMDGPUInlinerPass(*PR);
231 }
232 
233 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
234   return llvm::make_unique<AMDGPUTargetObjectFile>();
235 }
236 
237 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
238   return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
239 }
240 
241 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
242   return new SIScheduleDAGMI(C);
243 }
244 
245 static ScheduleDAGInstrs *
246 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
247   ScheduleDAGMILive *DAG =
248     new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
249   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
250   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
251   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
252   return DAG;
253 }
254 
255 static ScheduleDAGInstrs *
256 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
257   auto DAG = new GCNIterativeScheduler(C,
258     GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
259   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
260   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
261   return DAG;
262 }
263 
264 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
265   return new GCNIterativeScheduler(C,
266     GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
267 }
268 
269 static ScheduleDAGInstrs *
270 createIterativeILPMachineScheduler(MachineSchedContext *C) {
271   auto DAG = new GCNIterativeScheduler(C,
272     GCNIterativeScheduler::SCHEDULE_ILP);
273   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
274   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
275   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
276   return DAG;
277 }
278 
279 static MachineSchedRegistry
280 R600SchedRegistry("r600", "Run R600's custom scheduler",
281                    createR600MachineScheduler);
282 
283 static MachineSchedRegistry
284 SISchedRegistry("si", "Run SI's custom scheduler",
285                 createSIMachineScheduler);
286 
287 static MachineSchedRegistry
288 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
289                              "Run GCN scheduler to maximize occupancy",
290                              createGCNMaxOccupancyMachineScheduler);
291 
292 static MachineSchedRegistry
293 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
294   "Run GCN scheduler to maximize occupancy (experimental)",
295   createIterativeGCNMaxOccupancyMachineScheduler);
296 
297 static MachineSchedRegistry
298 GCNMinRegSchedRegistry("gcn-minreg",
299   "Run GCN iterative scheduler for minimal register usage (experimental)",
300   createMinRegScheduler);
301 
302 static MachineSchedRegistry
303 GCNILPSchedRegistry("gcn-ilp",
304   "Run GCN iterative scheduler for ILP scheduling (experimental)",
305   createIterativeILPMachineScheduler);
306 
307 static StringRef computeDataLayout(const Triple &TT) {
308   if (TT.getArch() == Triple::r600) {
309     // 32-bit pointers.
310       return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
311              "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
312   }
313 
314   // 32-bit private, local, and region pointers. 64-bit global, constant and
315   // flat, non-integral buffer fat pointers.
316     return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
317          "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
318          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
319          "-ni:7";
320 }
321 
322 LLVM_READNONE
323 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
324   if (!GPU.empty())
325     return GPU;
326 
327   // Need to default to a target with flat support for HSA.
328   if (TT.getArch() == Triple::amdgcn)
329     return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
330 
331   return "r600";
332 }
333 
334 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
335   // The AMDGPU toolchain only supports generating shared objects, so we
336   // must always use PIC.
337   return Reloc::PIC_;
338 }
339 
340 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
341                                          StringRef CPU, StringRef FS,
342                                          TargetOptions Options,
343                                          Optional<Reloc::Model> RM,
344                                          Optional<CodeModel::Model> CM,
345                                          CodeGenOpt::Level OptLevel)
346     : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
347                         FS, Options, getEffectiveRelocModel(RM),
348                         getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
349       TLOF(createTLOF(getTargetTriple())) {
350   initAsmInfo();
351 }
352 
353 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
354 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
355 
356 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
357 
358 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
359   Attribute GPUAttr = F.getFnAttribute("target-cpu");
360   return GPUAttr.hasAttribute(Attribute::None) ?
361     getTargetCPU() : GPUAttr.getValueAsString();
362 }
363 
364 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
365   Attribute FSAttr = F.getFnAttribute("target-features");
366 
367   return FSAttr.hasAttribute(Attribute::None) ?
368     getTargetFeatureString() :
369     FSAttr.getValueAsString();
370 }
371 
372 /// Predicate for Internalize pass.
373 static bool mustPreserveGV(const GlobalValue &GV) {
374   if (const Function *F = dyn_cast<Function>(&GV))
375     return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
376 
377   return !GV.use_empty();
378 }
379 
380 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
381   Builder.DivergentTarget = true;
382 
383   bool EnableOpt = getOptLevel() > CodeGenOpt::None;
384   bool Internalize = InternalizeSymbols;
385   bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
386   bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
387   bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
388 
389   if (EnableFunctionCalls) {
390     delete Builder.Inliner;
391     Builder.Inliner = createAMDGPUFunctionInliningPass();
392   }
393 
394   Builder.addExtension(
395     PassManagerBuilder::EP_ModuleOptimizerEarly,
396     [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
397                                          legacy::PassManagerBase &PM) {
398       if (AMDGPUAA) {
399         PM.add(createAMDGPUAAWrapperPass());
400         PM.add(createAMDGPUExternalAAWrapperPass());
401       }
402       PM.add(createAMDGPUUnifyMetadataPass());
403       if (Internalize) {
404         PM.add(createInternalizePass(mustPreserveGV));
405         PM.add(createGlobalDCEPass());
406       }
407       if (EarlyInline)
408         PM.add(createAMDGPUAlwaysInlinePass(false));
409   });
410 
411   const auto &Opt = Options;
412   Builder.addExtension(
413     PassManagerBuilder::EP_EarlyAsPossible,
414     [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
415                                       legacy::PassManagerBase &PM) {
416       if (AMDGPUAA) {
417         PM.add(createAMDGPUAAWrapperPass());
418         PM.add(createAMDGPUExternalAAWrapperPass());
419       }
420       PM.add(llvm::createAMDGPUUseNativeCallsPass());
421       if (LibCallSimplify)
422         PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
423   });
424 
425   Builder.addExtension(
426     PassManagerBuilder::EP_CGSCCOptimizerLate,
427     [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
428       // Add infer address spaces pass to the opt pipeline after inlining
429       // but before SROA to increase SROA opportunities.
430       PM.add(createInferAddressSpacesPass());
431 
432       // This should run after inlining to have any chance of doing anything,
433       // and before other cleanup optimizations.
434       PM.add(createAMDGPULowerKernelAttributesPass());
435   });
436 }
437 
438 //===----------------------------------------------------------------------===//
439 // R600 Target Machine (R600 -> Cayman)
440 //===----------------------------------------------------------------------===//
441 
442 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
443                                      StringRef CPU, StringRef FS,
444                                      TargetOptions Options,
445                                      Optional<Reloc::Model> RM,
446                                      Optional<CodeModel::Model> CM,
447                                      CodeGenOpt::Level OL, bool JIT)
448     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
449   setRequiresStructuredCFG(true);
450 
451   // Override the default since calls aren't supported for r600.
452   if (EnableFunctionCalls &&
453       EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
454     EnableFunctionCalls = false;
455 }
456 
457 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
458   const Function &F) const {
459   StringRef GPU = getGPUName(F);
460   StringRef FS = getFeatureString(F);
461 
462   SmallString<128> SubtargetKey(GPU);
463   SubtargetKey.append(FS);
464 
465   auto &I = SubtargetMap[SubtargetKey];
466   if (!I) {
467     // This needs to be done before we create a new subtarget since any
468     // creation will depend on the TM and the code generation flags on the
469     // function that reside in TargetOptions.
470     resetTargetOptions(F);
471     I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
472   }
473 
474   return I.get();
475 }
476 
477 TargetTransformInfo
478 R600TargetMachine::getTargetTransformInfo(const Function &F) {
479   return TargetTransformInfo(R600TTIImpl(this, F));
480 }
481 
482 //===----------------------------------------------------------------------===//
483 // GCN Target Machine (SI+)
484 //===----------------------------------------------------------------------===//
485 
486 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
487                                    StringRef CPU, StringRef FS,
488                                    TargetOptions Options,
489                                    Optional<Reloc::Model> RM,
490                                    Optional<CodeModel::Model> CM,
491                                    CodeGenOpt::Level OL, bool JIT)
492     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
493 
494 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
495   StringRef GPU = getGPUName(F);
496   StringRef FS = getFeatureString(F);
497 
498   SmallString<128> SubtargetKey(GPU);
499   SubtargetKey.append(FS);
500 
501   auto &I = SubtargetMap[SubtargetKey];
502   if (!I) {
503     // This needs to be done before we create a new subtarget since any
504     // creation will depend on the TM and the code generation flags on the
505     // function that reside in TargetOptions.
506     resetTargetOptions(F);
507     I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
508   }
509 
510   I->setScalarizeGlobalBehavior(ScalarizeGlobal);
511 
512   return I.get();
513 }
514 
515 TargetTransformInfo
516 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
517   return TargetTransformInfo(GCNTTIImpl(this, F));
518 }
519 
520 //===----------------------------------------------------------------------===//
521 // AMDGPU Pass Setup
522 //===----------------------------------------------------------------------===//
523 
524 namespace {
525 
526 class AMDGPUPassConfig : public TargetPassConfig {
527 public:
528   AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
529     : TargetPassConfig(TM, PM) {
530     // Exceptions and StackMaps are not supported, so these passes will never do
531     // anything.
532     disablePass(&StackMapLivenessID);
533     disablePass(&FuncletLayoutID);
534   }
535 
536   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
537     return getTM<AMDGPUTargetMachine>();
538   }
539 
540   ScheduleDAGInstrs *
541   createMachineScheduler(MachineSchedContext *C) const override {
542     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
543     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
544     DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
545     return DAG;
546   }
547 
548   void addEarlyCSEOrGVNPass();
549   void addStraightLineScalarOptimizationPasses();
550   void addIRPasses() override;
551   void addCodeGenPrepare() override;
552   bool addPreISel() override;
553   bool addInstSelector() override;
554   bool addGCPasses() override;
555 
556   std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
557 };
558 
559 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
560   return getStandardCSEConfigForOpt(TM->getOptLevel());
561 }
562 
563 class R600PassConfig final : public AMDGPUPassConfig {
564 public:
565   R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
566     : AMDGPUPassConfig(TM, PM) {}
567 
568   ScheduleDAGInstrs *createMachineScheduler(
569     MachineSchedContext *C) const override {
570     return createR600MachineScheduler(C);
571   }
572 
573   bool addPreISel() override;
574   bool addInstSelector() override;
575   void addPreRegAlloc() override;
576   void addPreSched2() override;
577   void addPreEmitPass() override;
578 };
579 
580 class GCNPassConfig final : public AMDGPUPassConfig {
581 public:
582   GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
583     : AMDGPUPassConfig(TM, PM) {
584     // It is necessary to know the register usage of the entire call graph.  We
585     // allow calls without EnableAMDGPUFunctionCalls if they are marked
586     // noinline, so this is always required.
587     setRequiresCodeGenSCCOrder(true);
588   }
589 
590   GCNTargetMachine &getGCNTargetMachine() const {
591     return getTM<GCNTargetMachine>();
592   }
593 
594   ScheduleDAGInstrs *
595   createMachineScheduler(MachineSchedContext *C) const override;
596 
597   bool addPreISel() override;
598   void addMachineSSAOptimization() override;
599   bool addILPOpts() override;
600   bool addInstSelector() override;
601   bool addIRTranslator() override;
602   bool addLegalizeMachineIR() override;
603   bool addRegBankSelect() override;
604   bool addGlobalInstructionSelect() override;
605   void addFastRegAlloc() override;
606   void addOptimizedRegAlloc() override;
607   void addPreRegAlloc() override;
608   void addPostRegAlloc() override;
609   void addPreSched2() override;
610   void addPreEmitPass() override;
611 };
612 
613 } // end anonymous namespace
614 
615 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
616   if (getOptLevel() == CodeGenOpt::Aggressive)
617     addPass(createGVNPass());
618   else
619     addPass(createEarlyCSEPass());
620 }
621 
622 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
623   addPass(createLICMPass());
624   addPass(createSeparateConstOffsetFromGEPPass());
625   addPass(createSpeculativeExecutionPass());
626   // ReassociateGEPs exposes more opportunites for SLSR. See
627   // the example in reassociate-geps-and-slsr.ll.
628   addPass(createStraightLineStrengthReducePass());
629   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
630   // EarlyCSE can reuse.
631   addEarlyCSEOrGVNPass();
632   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
633   addPass(createNaryReassociatePass());
634   // NaryReassociate on GEPs creates redundant common expressions, so run
635   // EarlyCSE after it.
636   addPass(createEarlyCSEPass());
637 }
638 
639 void AMDGPUPassConfig::addIRPasses() {
640   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
641 
642   // There is no reason to run these.
643   disablePass(&StackMapLivenessID);
644   disablePass(&FuncletLayoutID);
645   disablePass(&PatchableFunctionID);
646 
647   addPass(createAtomicExpandPass());
648 
649   // This must occur before inlining, as the inliner will not look through
650   // bitcast calls.
651   addPass(createAMDGPUFixFunctionBitcastsPass());
652 
653   addPass(createAMDGPULowerIntrinsicsPass());
654 
655   // Function calls are not supported, so make sure we inline everything.
656   addPass(createAMDGPUAlwaysInlinePass());
657   addPass(createAlwaysInlinerLegacyPass());
658   // We need to add the barrier noop pass, otherwise adding the function
659   // inlining pass will cause all of the PassConfigs passes to be run
660   // one function at a time, which means if we have a nodule with two
661   // functions, then we will generate code for the first function
662   // without ever running any passes on the second.
663   addPass(createBarrierNoopPass());
664 
665   if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
666     // TODO: May want to move later or split into an early and late one.
667 
668     addPass(createAMDGPUCodeGenPreparePass());
669   }
670 
671   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
672   if (TM.getTargetTriple().getArch() == Triple::r600)
673     addPass(createR600OpenCLImageTypeLoweringPass());
674 
675   // Replace OpenCL enqueued block function pointers with global variables.
676   addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
677 
678   if (TM.getOptLevel() > CodeGenOpt::None) {
679     addPass(createInferAddressSpacesPass());
680     addPass(createAMDGPUPromoteAlloca());
681 
682     if (EnableSROA)
683       addPass(createSROAPass());
684 
685     if (EnableScalarIRPasses)
686       addStraightLineScalarOptimizationPasses();
687 
688     if (EnableAMDGPUAliasAnalysis) {
689       addPass(createAMDGPUAAWrapperPass());
690       addPass(createExternalAAWrapperPass([](Pass &P, Function &,
691                                              AAResults &AAR) {
692         if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
693           AAR.addAAResult(WrapperPass->getResult());
694         }));
695     }
696   }
697 
698   TargetPassConfig::addIRPasses();
699 
700   // EarlyCSE is not always strong enough to clean up what LSR produces. For
701   // example, GVN can combine
702   //
703   //   %0 = add %a, %b
704   //   %1 = add %b, %a
705   //
706   // and
707   //
708   //   %0 = shl nsw %a, 2
709   //   %1 = shl %a, 2
710   //
711   // but EarlyCSE can do neither of them.
712   if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
713     addEarlyCSEOrGVNPass();
714 }
715 
716 void AMDGPUPassConfig::addCodeGenPrepare() {
717   if (TM->getTargetTriple().getArch() == Triple::amdgcn)
718     addPass(createAMDGPUAnnotateKernelFeaturesPass());
719 
720   if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
721       EnableLowerKernelArguments)
722     addPass(createAMDGPULowerKernelArgumentsPass());
723 
724   TargetPassConfig::addCodeGenPrepare();
725 
726   if (EnableLoadStoreVectorizer)
727     addPass(createLoadStoreVectorizerPass());
728 }
729 
730 bool AMDGPUPassConfig::addPreISel() {
731   addPass(createLowerSwitchPass());
732   addPass(createFlattenCFGPass());
733   return false;
734 }
735 
736 bool AMDGPUPassConfig::addInstSelector() {
737   addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
738   return false;
739 }
740 
741 bool AMDGPUPassConfig::addGCPasses() {
742   // Do nothing. GC is not supported.
743   return false;
744 }
745 
746 //===----------------------------------------------------------------------===//
747 // R600 Pass Setup
748 //===----------------------------------------------------------------------===//
749 
750 bool R600PassConfig::addPreISel() {
751   AMDGPUPassConfig::addPreISel();
752 
753   if (EnableR600StructurizeCFG)
754     addPass(createStructurizeCFGPass());
755   return false;
756 }
757 
758 bool R600PassConfig::addInstSelector() {
759   addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
760   return false;
761 }
762 
763 void R600PassConfig::addPreRegAlloc() {
764   addPass(createR600VectorRegMerger());
765 }
766 
767 void R600PassConfig::addPreSched2() {
768   addPass(createR600EmitClauseMarkers(), false);
769   if (EnableR600IfConvert)
770     addPass(&IfConverterID, false);
771   addPass(createR600ClauseMergePass(), false);
772 }
773 
774 void R600PassConfig::addPreEmitPass() {
775   addPass(createAMDGPUCFGStructurizerPass(), false);
776   addPass(createR600ExpandSpecialInstrsPass(), false);
777   addPass(&FinalizeMachineBundlesID, false);
778   addPass(createR600Packetizer(), false);
779   addPass(createR600ControlFlowFinalizer(), false);
780 }
781 
782 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
783   return new R600PassConfig(*this, PM);
784 }
785 
786 //===----------------------------------------------------------------------===//
787 // GCN Pass Setup
788 //===----------------------------------------------------------------------===//
789 
790 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
791   MachineSchedContext *C) const {
792   const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
793   if (ST.enableSIScheduler())
794     return createSIMachineScheduler(C);
795   return createGCNMaxOccupancyMachineScheduler(C);
796 }
797 
798 bool GCNPassConfig::addPreISel() {
799   AMDGPUPassConfig::addPreISel();
800 
801   if (EnableAtomicOptimizations) {
802     addPass(createAMDGPUAtomicOptimizerPass());
803   }
804 
805   // FIXME: We need to run a pass to propagate the attributes when calls are
806   // supported.
807 
808   // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
809   // regions formed by them.
810   addPass(&AMDGPUUnifyDivergentExitNodesID);
811   if (!LateCFGStructurize) {
812     addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
813   }
814   addPass(createSinkingPass());
815   addPass(createAMDGPUAnnotateUniformValues());
816   if (!LateCFGStructurize) {
817     addPass(createSIAnnotateControlFlowPass());
818   }
819 
820   return false;
821 }
822 
823 void GCNPassConfig::addMachineSSAOptimization() {
824   TargetPassConfig::addMachineSSAOptimization();
825 
826   // We want to fold operands after PeepholeOptimizer has run (or as part of
827   // it), because it will eliminate extra copies making it easier to fold the
828   // real source operand. We want to eliminate dead instructions after, so that
829   // we see fewer uses of the copies. We then need to clean up the dead
830   // instructions leftover after the operands are folded as well.
831   //
832   // XXX - Can we get away without running DeadMachineInstructionElim again?
833   addPass(&SIFoldOperandsID);
834   if (EnableDPPCombine)
835     addPass(&GCNDPPCombineID);
836   addPass(&DeadMachineInstructionElimID);
837   addPass(&SILoadStoreOptimizerID);
838   if (EnableSDWAPeephole) {
839     addPass(&SIPeepholeSDWAID);
840     addPass(&EarlyMachineLICMID);
841     addPass(&MachineCSEID);
842     addPass(&SIFoldOperandsID);
843     addPass(&DeadMachineInstructionElimID);
844   }
845   addPass(createSIShrinkInstructionsPass());
846 }
847 
848 bool GCNPassConfig::addILPOpts() {
849   if (EnableEarlyIfConversion)
850     addPass(&EarlyIfConverterID);
851 
852   TargetPassConfig::addILPOpts();
853   return false;
854 }
855 
856 bool GCNPassConfig::addInstSelector() {
857   AMDGPUPassConfig::addInstSelector();
858   addPass(&SIFixSGPRCopiesID);
859   addPass(createSILowerI1CopiesPass());
860   addPass(createSIFixupVectorISelPass());
861   addPass(createSIAddIMGInitPass());
862   return false;
863 }
864 
865 bool GCNPassConfig::addIRTranslator() {
866   addPass(new IRTranslator());
867   return false;
868 }
869 
870 bool GCNPassConfig::addLegalizeMachineIR() {
871   addPass(new Legalizer());
872   return false;
873 }
874 
875 bool GCNPassConfig::addRegBankSelect() {
876   addPass(new RegBankSelect());
877   return false;
878 }
879 
880 bool GCNPassConfig::addGlobalInstructionSelect() {
881   addPass(new InstructionSelect());
882   return false;
883 }
884 
885 void GCNPassConfig::addPreRegAlloc() {
886   if (LateCFGStructurize) {
887     addPass(createAMDGPUMachineCFGStructurizerPass());
888   }
889   addPass(createSIWholeQuadModePass());
890 }
891 
892 void GCNPassConfig::addFastRegAlloc() {
893   // FIXME: We have to disable the verifier here because of PHIElimination +
894   // TwoAddressInstructions disabling it.
895 
896   // This must be run immediately after phi elimination and before
897   // TwoAddressInstructions, otherwise the processing of the tied operand of
898   // SI_ELSE will introduce a copy of the tied operand source after the else.
899   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
900 
901   // This must be run just after RegisterCoalescing.
902   insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
903 
904   TargetPassConfig::addFastRegAlloc();
905 }
906 
907 void GCNPassConfig::addOptimizedRegAlloc() {
908   if (OptExecMaskPreRA) {
909     insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
910     insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
911   } else {
912     insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
913   }
914 
915   // This must be run immediately after phi elimination and before
916   // TwoAddressInstructions, otherwise the processing of the tied operand of
917   // SI_ELSE will introduce a copy of the tied operand source after the else.
918   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
919 
920   // This must be run just after RegisterCoalescing.
921   insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
922 
923   if (EnableDCEInRA)
924     insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID);
925 
926   TargetPassConfig::addOptimizedRegAlloc();
927 }
928 
929 void GCNPassConfig::addPostRegAlloc() {
930   addPass(&SIFixVGPRCopiesID);
931   if (getOptLevel() > CodeGenOpt::None)
932     addPass(&SIOptimizeExecMaskingID);
933   TargetPassConfig::addPostRegAlloc();
934 }
935 
936 void GCNPassConfig::addPreSched2() {
937 }
938 
939 void GCNPassConfig::addPreEmitPass() {
940   addPass(createSIMemoryLegalizerPass());
941   addPass(createSIInsertWaitcntsPass());
942   addPass(createSIShrinkInstructionsPass());
943   addPass(createSIModeRegisterPass());
944 
945   // The hazard recognizer that runs as part of the post-ra scheduler does not
946   // guarantee to be able handle all hazards correctly. This is because if there
947   // are multiple scheduling regions in a basic block, the regions are scheduled
948   // bottom up, so when we begin to schedule a region we don't know what
949   // instructions were emitted directly before it.
950   //
951   // Here we add a stand-alone hazard recognizer pass which can handle all
952   // cases.
953   //
954   // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
955   // be better for it to emit S_NOP <N> when possible.
956   addPass(&PostRAHazardRecognizerID);
957 
958   addPass(&SIInsertSkipsPassID);
959   addPass(&BranchRelaxationPassID);
960 }
961 
962 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
963   return new GCNPassConfig(*this, PM);
964 }
965 
966 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
967   return new yaml::SIMachineFunctionInfo();
968 }
969 
970 yaml::MachineFunctionInfo *
971 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
972   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
973   return new yaml::SIMachineFunctionInfo(*MFI,
974                                          *MF.getSubtarget().getRegisterInfo());
975 }
976 
977 bool GCNTargetMachine::parseMachineFunctionInfo(
978     const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
979     SMDiagnostic &Error, SMRange &SourceRange) const {
980   const yaml::SIMachineFunctionInfo &YamlMFI =
981       reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
982   MachineFunction &MF = PFS.MF;
983   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
984 
985   MFI->initializeBaseYamlFields(YamlMFI);
986 
987   auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
988     if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
989       SourceRange = RegName.SourceRange;
990       return true;
991     }
992 
993     return false;
994   };
995 
996   auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
997     // Create a diagnostic for a the register string literal.
998     const MemoryBuffer &Buffer =
999         *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1000     Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1001                          RegName.Value.size(), SourceMgr::DK_Error,
1002                          "incorrect register class for field", RegName.Value,
1003                          None, None);
1004     SourceRange = RegName.SourceRange;
1005     return true;
1006   };
1007 
1008   if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1009       parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1010       parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1011       parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1012     return true;
1013 
1014   if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1015       !AMDGPU::SReg_128RegClass.contains(MFI->ScratchRSrcReg)) {
1016     return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1017   }
1018 
1019   if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1020       !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1021     return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1022   }
1023 
1024   if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1025       !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1026     return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1027   }
1028 
1029   if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1030       !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1031     return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1032   }
1033 
1034   return false;
1035 }
1036