1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The AMDGPU target machine contains all of the hardware specific
11 /// information  needed to emit code for R600 and SI GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUTargetMachine.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUAliasAnalysis.h"
18 #include "AMDGPUExportClustering.h"
19 #include "AMDGPUMacroFusion.h"
20 #include "AMDGPUTargetObjectFile.h"
21 #include "AMDGPUTargetTransformInfo.h"
22 #include "GCNIterativeScheduler.h"
23 #include "GCNSchedStrategy.h"
24 #include "R600MachineScheduler.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIMachineScheduler.h"
27 #include "TargetInfo/AMDGPUTargetInfo.h"
28 #include "llvm/Analysis/CGSCCPassManager.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/Localizer.h"
33 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
34 #include "llvm/CodeGen/MIRParser/MIParser.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/RegAllocRegistry.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/IR/LegacyPassManager.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Passes/PassBuilder.h"
42 #include "llvm/Support/TargetRegistry.h"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/Transforms/IPO/AlwaysInliner.h"
45 #include "llvm/Transforms/IPO/GlobalDCE.h"
46 #include "llvm/Transforms/IPO/Internalize.h"
47 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
48 #include "llvm/Transforms/Scalar.h"
49 #include "llvm/Transforms/Scalar/GVN.h"
50 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
51 #include "llvm/Transforms/Utils.h"
52 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
53 #include "llvm/Transforms/Vectorize.h"
54 
55 using namespace llvm;
56 
57 namespace {
58 class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
59 public:
60   SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
61     : RegisterRegAllocBase(N, D, C) {}
62 };
63 
64 class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
65 public:
66   VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
67     : RegisterRegAllocBase(N, D, C) {}
68 };
69 
70 static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
71                               const TargetRegisterClass &RC) {
72   return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
73 }
74 
75 static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
76                               const TargetRegisterClass &RC) {
77   return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
78 }
79 
80 
81 /// -{sgpr|vgpr}-regalloc=... command line option.
82 static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
83 
84 /// A dummy default pass factory indicates whether the register allocator is
85 /// overridden on the command line.
86 static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
87 static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
88 
89 static SGPRRegisterRegAlloc
90 defaultSGPRRegAlloc("default",
91                     "pick SGPR register allocator based on -O option",
92                     useDefaultRegisterAllocator);
93 
94 static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
95                RegisterPassParser<SGPRRegisterRegAlloc>>
96 SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
97              cl::desc("Register allocator to use for SGPRs"));
98 
99 static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
100                RegisterPassParser<VGPRRegisterRegAlloc>>
101 VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
102              cl::desc("Register allocator to use for VGPRs"));
103 
104 
105 static void initializeDefaultSGPRRegisterAllocatorOnce() {
106   RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
107 
108   if (!Ctor) {
109     Ctor = SGPRRegAlloc;
110     SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
111   }
112 }
113 
114 static void initializeDefaultVGPRRegisterAllocatorOnce() {
115   RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
116 
117   if (!Ctor) {
118     Ctor = VGPRRegAlloc;
119     VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
120   }
121 }
122 
123 static FunctionPass *createBasicSGPRRegisterAllocator() {
124   return createBasicRegisterAllocator(onlyAllocateSGPRs);
125 }
126 
127 static FunctionPass *createGreedySGPRRegisterAllocator() {
128   return createGreedyRegisterAllocator(onlyAllocateSGPRs);
129 }
130 
131 static FunctionPass *createFastSGPRRegisterAllocator() {
132   return createFastRegisterAllocator(onlyAllocateSGPRs, false);
133 }
134 
135 static FunctionPass *createBasicVGPRRegisterAllocator() {
136   return createBasicRegisterAllocator(onlyAllocateVGPRs);
137 }
138 
139 static FunctionPass *createGreedyVGPRRegisterAllocator() {
140   return createGreedyRegisterAllocator(onlyAllocateVGPRs);
141 }
142 
143 static FunctionPass *createFastVGPRRegisterAllocator() {
144   return createFastRegisterAllocator(onlyAllocateVGPRs, true);
145 }
146 
147 static SGPRRegisterRegAlloc basicRegAllocSGPR(
148   "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
149 static SGPRRegisterRegAlloc greedyRegAllocSGPR(
150   "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
151 
152 static SGPRRegisterRegAlloc fastRegAllocSGPR(
153   "fast", "fast register allocator", createFastSGPRRegisterAllocator);
154 
155 
156 static VGPRRegisterRegAlloc basicRegAllocVGPR(
157   "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
158 static VGPRRegisterRegAlloc greedyRegAllocVGPR(
159   "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
160 
161 static VGPRRegisterRegAlloc fastRegAllocVGPR(
162   "fast", "fast register allocator", createFastVGPRRegisterAllocator);
163 }
164 
165 
166 static cl::opt<bool> EnableR600StructurizeCFG(
167   "r600-ir-structurize",
168   cl::desc("Use StructurizeCFG IR pass"),
169   cl::init(true));
170 
171 static cl::opt<bool> EnableSROA(
172   "amdgpu-sroa",
173   cl::desc("Run SROA after promote alloca pass"),
174   cl::ReallyHidden,
175   cl::init(true));
176 
177 static cl::opt<bool>
178 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
179                         cl::desc("Run early if-conversion"),
180                         cl::init(false));
181 
182 static cl::opt<bool>
183 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
184             cl::desc("Run pre-RA exec mask optimizations"),
185             cl::init(true));
186 
187 static cl::opt<bool> EnableR600IfConvert(
188   "r600-if-convert",
189   cl::desc("Use if conversion pass"),
190   cl::ReallyHidden,
191   cl::init(true));
192 
193 // Option to disable vectorizer for tests.
194 static cl::opt<bool> EnableLoadStoreVectorizer(
195   "amdgpu-load-store-vectorizer",
196   cl::desc("Enable load store vectorizer"),
197   cl::init(true),
198   cl::Hidden);
199 
200 // Option to control global loads scalarization
201 static cl::opt<bool> ScalarizeGlobal(
202   "amdgpu-scalarize-global-loads",
203   cl::desc("Enable global load scalarization"),
204   cl::init(true),
205   cl::Hidden);
206 
207 // Option to run internalize pass.
208 static cl::opt<bool> InternalizeSymbols(
209   "amdgpu-internalize-symbols",
210   cl::desc("Enable elimination of non-kernel functions and unused globals"),
211   cl::init(false),
212   cl::Hidden);
213 
214 // Option to inline all early.
215 static cl::opt<bool> EarlyInlineAll(
216   "amdgpu-early-inline-all",
217   cl::desc("Inline all functions early"),
218   cl::init(false),
219   cl::Hidden);
220 
221 static cl::opt<bool> EnableSDWAPeephole(
222   "amdgpu-sdwa-peephole",
223   cl::desc("Enable SDWA peepholer"),
224   cl::init(true));
225 
226 static cl::opt<bool> EnableDPPCombine(
227   "amdgpu-dpp-combine",
228   cl::desc("Enable DPP combiner"),
229   cl::init(true));
230 
231 // Enable address space based alias analysis
232 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
233   cl::desc("Enable AMDGPU Alias Analysis"),
234   cl::init(true));
235 
236 // Option to run late CFG structurizer
237 static cl::opt<bool, true> LateCFGStructurize(
238   "amdgpu-late-structurize",
239   cl::desc("Enable late CFG structurization"),
240   cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
241   cl::Hidden);
242 
243 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
244   "amdgpu-function-calls",
245   cl::desc("Enable AMDGPU function call support"),
246   cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
247   cl::init(true),
248   cl::Hidden);
249 
250 static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt(
251   "amdgpu-fixed-function-abi",
252   cl::desc("Enable all implicit function arguments"),
253   cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI),
254   cl::init(false),
255   cl::Hidden);
256 
257 // Enable lib calls simplifications
258 static cl::opt<bool> EnableLibCallSimplify(
259   "amdgpu-simplify-libcall",
260   cl::desc("Enable amdgpu library simplifications"),
261   cl::init(true),
262   cl::Hidden);
263 
264 static cl::opt<bool> EnableLowerKernelArguments(
265   "amdgpu-ir-lower-kernel-arguments",
266   cl::desc("Lower kernel argument loads in IR pass"),
267   cl::init(true),
268   cl::Hidden);
269 
270 static cl::opt<bool> EnableRegReassign(
271   "amdgpu-reassign-regs",
272   cl::desc("Enable register reassign optimizations on gfx10+"),
273   cl::init(true),
274   cl::Hidden);
275 
276 static cl::opt<bool> OptVGPRLiveRange(
277     "amdgpu-opt-vgpr-liverange",
278     cl::desc("Enable VGPR liverange optimizations for if-else structure"),
279     cl::init(true), cl::Hidden);
280 
281 // Enable atomic optimization
282 static cl::opt<bool> EnableAtomicOptimizations(
283   "amdgpu-atomic-optimizations",
284   cl::desc("Enable atomic optimizations"),
285   cl::init(false),
286   cl::Hidden);
287 
288 // Enable Mode register optimization
289 static cl::opt<bool> EnableSIModeRegisterPass(
290   "amdgpu-mode-register",
291   cl::desc("Enable mode register pass"),
292   cl::init(true),
293   cl::Hidden);
294 
295 // Option is used in lit tests to prevent deadcoding of patterns inspected.
296 static cl::opt<bool>
297 EnableDCEInRA("amdgpu-dce-in-ra",
298     cl::init(true), cl::Hidden,
299     cl::desc("Enable machine DCE inside regalloc"));
300 
301 static cl::opt<bool> EnableScalarIRPasses(
302   "amdgpu-scalar-ir-passes",
303   cl::desc("Enable scalar IR passes"),
304   cl::init(true),
305   cl::Hidden);
306 
307 static cl::opt<bool> EnableStructurizerWorkarounds(
308     "amdgpu-enable-structurizer-workarounds",
309     cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
310     cl::Hidden);
311 
312 static cl::opt<bool> EnableLDSReplaceWithPointer(
313     "amdgpu-enable-lds-replace-with-pointer",
314     cl::desc("Enable LDS replace with pointer pass"), cl::init(false),
315     cl::Hidden);
316 
317 static cl::opt<bool, true> EnableLowerModuleLDS(
318     "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
319     cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true),
320     cl::Hidden);
321 
322 static cl::opt<bool> EnablePreRAOptimizations(
323     "amdgpu-enable-pre-ra-optimizations",
324     cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
325     cl::Hidden);
326 
327 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
328   // Register the target
329   RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
330   RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
331 
332   PassRegistry *PR = PassRegistry::getPassRegistry();
333   initializeR600ClauseMergePassPass(*PR);
334   initializeR600ControlFlowFinalizerPass(*PR);
335   initializeR600PacketizerPass(*PR);
336   initializeR600ExpandSpecialInstrsPassPass(*PR);
337   initializeR600VectorRegMergerPass(*PR);
338   initializeGlobalISel(*PR);
339   initializeAMDGPUDAGToDAGISelPass(*PR);
340   initializeGCNDPPCombinePass(*PR);
341   initializeSILowerI1CopiesPass(*PR);
342   initializeSILowerSGPRSpillsPass(*PR);
343   initializeSIFixSGPRCopiesPass(*PR);
344   initializeSIFixVGPRCopiesPass(*PR);
345   initializeSIFoldOperandsPass(*PR);
346   initializeSIPeepholeSDWAPass(*PR);
347   initializeSIShrinkInstructionsPass(*PR);
348   initializeSIOptimizeExecMaskingPreRAPass(*PR);
349   initializeSIOptimizeVGPRLiveRangePass(*PR);
350   initializeSILoadStoreOptimizerPass(*PR);
351   initializeAMDGPUFixFunctionBitcastsPass(*PR);
352   initializeAMDGPUCtorDtorLoweringPass(*PR);
353   initializeAMDGPUAlwaysInlinePass(*PR);
354   initializeAMDGPUAttributorPass(*PR);
355   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
356   initializeAMDGPUAnnotateUniformValuesPass(*PR);
357   initializeAMDGPUArgumentUsageInfoPass(*PR);
358   initializeAMDGPUAtomicOptimizerPass(*PR);
359   initializeAMDGPULowerKernelArgumentsPass(*PR);
360   initializeAMDGPULowerKernelAttributesPass(*PR);
361   initializeAMDGPULowerIntrinsicsPass(*PR);
362   initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
363   initializeAMDGPUPostLegalizerCombinerPass(*PR);
364   initializeAMDGPUPreLegalizerCombinerPass(*PR);
365   initializeAMDGPURegBankCombinerPass(*PR);
366   initializeAMDGPUPromoteAllocaPass(*PR);
367   initializeAMDGPUPromoteAllocaToVectorPass(*PR);
368   initializeAMDGPUCodeGenPreparePass(*PR);
369   initializeAMDGPULateCodeGenPreparePass(*PR);
370   initializeAMDGPUPropagateAttributesEarlyPass(*PR);
371   initializeAMDGPUPropagateAttributesLatePass(*PR);
372   initializeAMDGPUReplaceLDSUseWithPointerPass(*PR);
373   initializeAMDGPULowerModuleLDSPass(*PR);
374   initializeAMDGPURewriteOutArgumentsPass(*PR);
375   initializeAMDGPUUnifyMetadataPass(*PR);
376   initializeSIAnnotateControlFlowPass(*PR);
377   initializeSIInsertHardClausesPass(*PR);
378   initializeSIInsertWaitcntsPass(*PR);
379   initializeSIModeRegisterPass(*PR);
380   initializeSIWholeQuadModePass(*PR);
381   initializeSILowerControlFlowPass(*PR);
382   initializeSIPreEmitPeepholePass(*PR);
383   initializeSILateBranchLoweringPass(*PR);
384   initializeSIMemoryLegalizerPass(*PR);
385   initializeSIOptimizeExecMaskingPass(*PR);
386   initializeSIPreAllocateWWMRegsPass(*PR);
387   initializeSIFormMemoryClausesPass(*PR);
388   initializeSIPostRABundlerPass(*PR);
389   initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
390   initializeAMDGPUAAWrapperPassPass(*PR);
391   initializeAMDGPUExternalAAWrapperPass(*PR);
392   initializeAMDGPUUseNativeCallsPass(*PR);
393   initializeAMDGPUSimplifyLibCallsPass(*PR);
394   initializeAMDGPUPrintfRuntimeBindingPass(*PR);
395   initializeAMDGPUResourceUsageAnalysisPass(*PR);
396   initializeGCNNSAReassignPass(*PR);
397   initializeGCNPreRAOptimizationsPass(*PR);
398 }
399 
400 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
401   return std::make_unique<AMDGPUTargetObjectFile>();
402 }
403 
404 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
405   return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>());
406 }
407 
408 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
409   return new SIScheduleDAGMI(C);
410 }
411 
412 static ScheduleDAGInstrs *
413 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
414   ScheduleDAGMILive *DAG =
415     new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
416   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
417   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
418   DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
419   return DAG;
420 }
421 
422 static ScheduleDAGInstrs *
423 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
424   auto DAG = new GCNIterativeScheduler(C,
425     GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
426   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
427   return DAG;
428 }
429 
430 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
431   return new GCNIterativeScheduler(C,
432     GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
433 }
434 
435 static ScheduleDAGInstrs *
436 createIterativeILPMachineScheduler(MachineSchedContext *C) {
437   auto DAG = new GCNIterativeScheduler(C,
438     GCNIterativeScheduler::SCHEDULE_ILP);
439   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
440   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
441   return DAG;
442 }
443 
444 static MachineSchedRegistry
445 R600SchedRegistry("r600", "Run R600's custom scheduler",
446                    createR600MachineScheduler);
447 
448 static MachineSchedRegistry
449 SISchedRegistry("si", "Run SI's custom scheduler",
450                 createSIMachineScheduler);
451 
452 static MachineSchedRegistry
453 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
454                              "Run GCN scheduler to maximize occupancy",
455                              createGCNMaxOccupancyMachineScheduler);
456 
457 static MachineSchedRegistry
458 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
459   "Run GCN scheduler to maximize occupancy (experimental)",
460   createIterativeGCNMaxOccupancyMachineScheduler);
461 
462 static MachineSchedRegistry
463 GCNMinRegSchedRegistry("gcn-minreg",
464   "Run GCN iterative scheduler for minimal register usage (experimental)",
465   createMinRegScheduler);
466 
467 static MachineSchedRegistry
468 GCNILPSchedRegistry("gcn-ilp",
469   "Run GCN iterative scheduler for ILP scheduling (experimental)",
470   createIterativeILPMachineScheduler);
471 
472 static StringRef computeDataLayout(const Triple &TT) {
473   if (TT.getArch() == Triple::r600) {
474     // 32-bit pointers.
475     return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
476            "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
477   }
478 
479   // 32-bit private, local, and region pointers. 64-bit global, constant and
480   // flat, non-integral buffer fat pointers.
481   return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
482          "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
483          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
484          "-ni:7";
485 }
486 
487 LLVM_READNONE
488 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
489   if (!GPU.empty())
490     return GPU;
491 
492   // Need to default to a target with flat support for HSA.
493   if (TT.getArch() == Triple::amdgcn)
494     return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
495 
496   return "r600";
497 }
498 
499 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
500   // The AMDGPU toolchain only supports generating shared objects, so we
501   // must always use PIC.
502   return Reloc::PIC_;
503 }
504 
505 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
506                                          StringRef CPU, StringRef FS,
507                                          TargetOptions Options,
508                                          Optional<Reloc::Model> RM,
509                                          Optional<CodeModel::Model> CM,
510                                          CodeGenOpt::Level OptLevel)
511     : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
512                         FS, Options, getEffectiveRelocModel(RM),
513                         getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
514       TLOF(createTLOF(getTargetTriple())) {
515   initAsmInfo();
516   if (TT.getArch() == Triple::amdgcn) {
517     if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
518       MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64));
519     else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
520       MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32));
521   }
522 }
523 
524 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
525 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
526 bool AMDGPUTargetMachine::EnableFixedFunctionABI = false;
527 bool AMDGPUTargetMachine::EnableLowerModuleLDS = true;
528 
529 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
530 
531 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
532   Attribute GPUAttr = F.getFnAttribute("target-cpu");
533   return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
534 }
535 
536 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
537   Attribute FSAttr = F.getFnAttribute("target-features");
538 
539   return FSAttr.isValid() ? FSAttr.getValueAsString()
540                           : getTargetFeatureString();
541 }
542 
543 /// Predicate for Internalize pass.
544 static bool mustPreserveGV(const GlobalValue &GV) {
545   if (const Function *F = dyn_cast<Function>(&GV))
546     return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
547 
548   GV.removeDeadConstantUsers();
549   return !GV.use_empty();
550 }
551 
552 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
553   Builder.DivergentTarget = true;
554 
555   bool EnableOpt = getOptLevel() > CodeGenOpt::None;
556   bool Internalize = InternalizeSymbols;
557   bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
558   bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
559   bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
560 
561   if (EnableFunctionCalls) {
562     delete Builder.Inliner;
563     Builder.Inliner = createFunctionInliningPass();
564   }
565 
566   Builder.addExtension(
567     PassManagerBuilder::EP_ModuleOptimizerEarly,
568     [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
569                                                legacy::PassManagerBase &PM) {
570       if (AMDGPUAA) {
571         PM.add(createAMDGPUAAWrapperPass());
572         PM.add(createAMDGPUExternalAAWrapperPass());
573       }
574       PM.add(createAMDGPUUnifyMetadataPass());
575       PM.add(createAMDGPUPrintfRuntimeBinding());
576       if (Internalize)
577         PM.add(createInternalizePass(mustPreserveGV));
578       PM.add(createAMDGPUPropagateAttributesLatePass(this));
579       if (Internalize)
580         PM.add(createGlobalDCEPass());
581       if (EarlyInline)
582         PM.add(createAMDGPUAlwaysInlinePass(false));
583   });
584 
585   Builder.addExtension(
586     PassManagerBuilder::EP_EarlyAsPossible,
587     [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &,
588                                       legacy::PassManagerBase &PM) {
589       if (AMDGPUAA) {
590         PM.add(createAMDGPUAAWrapperPass());
591         PM.add(createAMDGPUExternalAAWrapperPass());
592       }
593       PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
594       PM.add(llvm::createAMDGPUUseNativeCallsPass());
595       if (LibCallSimplify)
596         PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this));
597   });
598 
599   Builder.addExtension(
600     PassManagerBuilder::EP_CGSCCOptimizerLate,
601     [EnableOpt](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
602       // Add infer address spaces pass to the opt pipeline after inlining
603       // but before SROA to increase SROA opportunities.
604       PM.add(createInferAddressSpacesPass());
605 
606       // This should run after inlining to have any chance of doing anything,
607       // and before other cleanup optimizations.
608       PM.add(createAMDGPULowerKernelAttributesPass());
609 
610       // Promote alloca to vector before SROA and loop unroll. If we manage
611       // to eliminate allocas before unroll we may choose to unroll less.
612       if (EnableOpt)
613         PM.add(createAMDGPUPromoteAllocaToVector());
614   });
615 }
616 
617 void AMDGPUTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) {
618   AAM.registerFunctionAnalysis<AMDGPUAA>();
619 }
620 
621 void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
622   PB.registerPipelineParsingCallback(
623       [this](StringRef PassName, ModulePassManager &PM,
624              ArrayRef<PassBuilder::PipelineElement>) {
625         if (PassName == "amdgpu-propagate-attributes-late") {
626           PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
627           return true;
628         }
629         if (PassName == "amdgpu-unify-metadata") {
630           PM.addPass(AMDGPUUnifyMetadataPass());
631           return true;
632         }
633         if (PassName == "amdgpu-printf-runtime-binding") {
634           PM.addPass(AMDGPUPrintfRuntimeBindingPass());
635           return true;
636         }
637         if (PassName == "amdgpu-always-inline") {
638           PM.addPass(AMDGPUAlwaysInlinePass());
639           return true;
640         }
641         if (PassName == "amdgpu-replace-lds-use-with-pointer") {
642           PM.addPass(AMDGPUReplaceLDSUseWithPointerPass());
643           return true;
644         }
645         if (PassName == "amdgpu-lower-module-lds") {
646           PM.addPass(AMDGPULowerModuleLDSPass());
647           return true;
648         }
649         return false;
650       });
651   PB.registerPipelineParsingCallback(
652       [this](StringRef PassName, FunctionPassManager &PM,
653              ArrayRef<PassBuilder::PipelineElement>) {
654         if (PassName == "amdgpu-simplifylib") {
655           PM.addPass(AMDGPUSimplifyLibCallsPass(*this));
656           return true;
657         }
658         if (PassName == "amdgpu-usenative") {
659           PM.addPass(AMDGPUUseNativeCallsPass());
660           return true;
661         }
662         if (PassName == "amdgpu-promote-alloca") {
663           PM.addPass(AMDGPUPromoteAllocaPass(*this));
664           return true;
665         }
666         if (PassName == "amdgpu-promote-alloca-to-vector") {
667           PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
668           return true;
669         }
670         if (PassName == "amdgpu-lower-kernel-attributes") {
671           PM.addPass(AMDGPULowerKernelAttributesPass());
672           return true;
673         }
674         if (PassName == "amdgpu-propagate-attributes-early") {
675           PM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
676           return true;
677         }
678         return false;
679       });
680 
681   PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) {
682     FAM.registerPass([&] { return AMDGPUAA(); });
683   });
684 
685   PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) {
686     if (AAName == "amdgpu-aa") {
687       AAM.registerFunctionAnalysis<AMDGPUAA>();
688       return true;
689     }
690     return false;
691   });
692 
693   PB.registerPipelineStartEPCallback(
694       [this](ModulePassManager &PM, OptimizationLevel Level) {
695         FunctionPassManager FPM;
696         FPM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
697         FPM.addPass(AMDGPUUseNativeCallsPass());
698         if (EnableLibCallSimplify && Level != OptimizationLevel::O0)
699           FPM.addPass(AMDGPUSimplifyLibCallsPass(*this));
700         PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
701       });
702 
703   PB.registerPipelineEarlySimplificationEPCallback(
704       [this](ModulePassManager &PM, OptimizationLevel Level) {
705         if (Level == OptimizationLevel::O0)
706           return;
707 
708         PM.addPass(AMDGPUUnifyMetadataPass());
709         PM.addPass(AMDGPUPrintfRuntimeBindingPass());
710 
711         if (InternalizeSymbols) {
712           PM.addPass(InternalizePass(mustPreserveGV));
713         }
714         PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
715         if (InternalizeSymbols) {
716           PM.addPass(GlobalDCEPass());
717         }
718         if (EarlyInlineAll && !EnableFunctionCalls)
719           PM.addPass(AMDGPUAlwaysInlinePass());
720       });
721 
722   PB.registerCGSCCOptimizerLateEPCallback(
723       [this](CGSCCPassManager &PM, OptimizationLevel Level) {
724         if (Level == OptimizationLevel::O0)
725           return;
726 
727         FunctionPassManager FPM;
728 
729         // Add infer address spaces pass to the opt pipeline after inlining
730         // but before SROA to increase SROA opportunities.
731         FPM.addPass(InferAddressSpacesPass());
732 
733         // This should run after inlining to have any chance of doing
734         // anything, and before other cleanup optimizations.
735         FPM.addPass(AMDGPULowerKernelAttributesPass());
736 
737         if (Level != OptimizationLevel::O0) {
738           // Promote alloca to vector before SROA and loop unroll. If we
739           // manage to eliminate allocas before unroll we may choose to unroll
740           // less.
741           FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
742         }
743 
744         PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
745       });
746 }
747 
748 //===----------------------------------------------------------------------===//
749 // R600 Target Machine (R600 -> Cayman)
750 //===----------------------------------------------------------------------===//
751 
752 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
753                                      StringRef CPU, StringRef FS,
754                                      TargetOptions Options,
755                                      Optional<Reloc::Model> RM,
756                                      Optional<CodeModel::Model> CM,
757                                      CodeGenOpt::Level OL, bool JIT)
758     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
759   setRequiresStructuredCFG(true);
760 
761   // Override the default since calls aren't supported for r600.
762   if (EnableFunctionCalls &&
763       EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
764     EnableFunctionCalls = false;
765 }
766 
767 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
768   const Function &F) const {
769   StringRef GPU = getGPUName(F);
770   StringRef FS = getFeatureString(F);
771 
772   SmallString<128> SubtargetKey(GPU);
773   SubtargetKey.append(FS);
774 
775   auto &I = SubtargetMap[SubtargetKey];
776   if (!I) {
777     // This needs to be done before we create a new subtarget since any
778     // creation will depend on the TM and the code generation flags on the
779     // function that reside in TargetOptions.
780     resetTargetOptions(F);
781     I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
782   }
783 
784   return I.get();
785 }
786 
787 int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
788   return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
789           AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
790           AddrSpace == AMDGPUAS::REGION_ADDRESS)
791              ? -1
792              : 0;
793 }
794 
795 bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS,
796                                               unsigned DestAS) const {
797   return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
798          AMDGPU::isFlatGlobalAddrSpace(DestAS);
799 }
800 
801 unsigned AMDGPUTargetMachine::getAssumedAddrSpace(const Value *V) const {
802   const auto *LD = dyn_cast<LoadInst>(V);
803   if (!LD)
804     return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
805 
806   // It must be a generic pointer loaded.
807   assert(V->getType()->isPointerTy() &&
808          V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
809 
810   const auto *Ptr = LD->getPointerOperand();
811   if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
812     return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
813   // For a generic pointer loaded from the constant memory, it could be assumed
814   // as a global pointer since the constant memory is only populated on the
815   // host side. As implied by the offload programming model, only global
816   // pointers could be referenced on the host side.
817   return AMDGPUAS::GLOBAL_ADDRESS;
818 }
819 
820 TargetTransformInfo
821 R600TargetMachine::getTargetTransformInfo(const Function &F) {
822   return TargetTransformInfo(R600TTIImpl(this, F));
823 }
824 
825 //===----------------------------------------------------------------------===//
826 // GCN Target Machine (SI+)
827 //===----------------------------------------------------------------------===//
828 
829 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
830                                    StringRef CPU, StringRef FS,
831                                    TargetOptions Options,
832                                    Optional<Reloc::Model> RM,
833                                    Optional<CodeModel::Model> CM,
834                                    CodeGenOpt::Level OL, bool JIT)
835     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
836 
837 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
838   StringRef GPU = getGPUName(F);
839   StringRef FS = getFeatureString(F);
840 
841   SmallString<128> SubtargetKey(GPU);
842   SubtargetKey.append(FS);
843 
844   auto &I = SubtargetMap[SubtargetKey];
845   if (!I) {
846     // This needs to be done before we create a new subtarget since any
847     // creation will depend on the TM and the code generation flags on the
848     // function that reside in TargetOptions.
849     resetTargetOptions(F);
850     I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
851   }
852 
853   I->setScalarizeGlobalBehavior(ScalarizeGlobal);
854 
855   return I.get();
856 }
857 
858 TargetTransformInfo
859 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
860   return TargetTransformInfo(GCNTTIImpl(this, F));
861 }
862 
863 //===----------------------------------------------------------------------===//
864 // AMDGPU Pass Setup
865 //===----------------------------------------------------------------------===//
866 
867 namespace {
868 
869 class AMDGPUPassConfig : public TargetPassConfig {
870 public:
871   AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
872     : TargetPassConfig(TM, PM) {
873     // Exceptions and StackMaps are not supported, so these passes will never do
874     // anything.
875     disablePass(&StackMapLivenessID);
876     disablePass(&FuncletLayoutID);
877     // Garbage collection is not supported.
878     disablePass(&GCLoweringID);
879     disablePass(&ShadowStackGCLoweringID);
880   }
881 
882   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
883     return getTM<AMDGPUTargetMachine>();
884   }
885 
886   ScheduleDAGInstrs *
887   createMachineScheduler(MachineSchedContext *C) const override {
888     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
889     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
890     return DAG;
891   }
892 
893   void addEarlyCSEOrGVNPass();
894   void addStraightLineScalarOptimizationPasses();
895   void addIRPasses() override;
896   void addCodeGenPrepare() override;
897   bool addPreISel() override;
898   bool addInstSelector() override;
899   bool addGCPasses() override;
900 
901   std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
902 
903   /// Check if a pass is enabled given \p Opt option. The option always
904   /// overrides defaults if explicitely used. Otherwise its default will
905   /// be used given that a pass shall work at an optimization \p Level
906   /// minimum.
907   bool isPassEnabled(const cl::opt<bool> &Opt,
908                      CodeGenOpt::Level Level = CodeGenOpt::Default) const {
909     if (Opt.getNumOccurrences())
910       return Opt;
911     if (TM->getOptLevel() < Level)
912       return false;
913     return Opt;
914   }
915 };
916 
917 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
918   return getStandardCSEConfigForOpt(TM->getOptLevel());
919 }
920 
921 class R600PassConfig final : public AMDGPUPassConfig {
922 public:
923   R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
924     : AMDGPUPassConfig(TM, PM) {}
925 
926   ScheduleDAGInstrs *createMachineScheduler(
927     MachineSchedContext *C) const override {
928     return createR600MachineScheduler(C);
929   }
930 
931   bool addPreISel() override;
932   bool addInstSelector() override;
933   void addPreRegAlloc() override;
934   void addPreSched2() override;
935   void addPreEmitPass() override;
936 };
937 
938 class GCNPassConfig final : public AMDGPUPassConfig {
939 public:
940   GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
941     : AMDGPUPassConfig(TM, PM) {
942     // It is necessary to know the register usage of the entire call graph.  We
943     // allow calls without EnableAMDGPUFunctionCalls if they are marked
944     // noinline, so this is always required.
945     setRequiresCodeGenSCCOrder(true);
946   }
947 
948   GCNTargetMachine &getGCNTargetMachine() const {
949     return getTM<GCNTargetMachine>();
950   }
951 
952   ScheduleDAGInstrs *
953   createMachineScheduler(MachineSchedContext *C) const override;
954 
955   bool addPreISel() override;
956   void addMachineSSAOptimization() override;
957   bool addILPOpts() override;
958   bool addInstSelector() override;
959   bool addIRTranslator() override;
960   void addPreLegalizeMachineIR() override;
961   bool addLegalizeMachineIR() override;
962   void addPreRegBankSelect() override;
963   bool addRegBankSelect() override;
964   void addPreGlobalInstructionSelect() override;
965   bool addGlobalInstructionSelect() override;
966   void addFastRegAlloc() override;
967   void addOptimizedRegAlloc() override;
968 
969   FunctionPass *createSGPRAllocPass(bool Optimized);
970   FunctionPass *createVGPRAllocPass(bool Optimized);
971   FunctionPass *createRegAllocPass(bool Optimized) override;
972 
973   bool addRegAssignAndRewriteFast() override;
974   bool addRegAssignAndRewriteOptimized() override;
975 
976   void addPreRegAlloc() override;
977   bool addPreRewrite() override;
978   void addPostRegAlloc() override;
979   void addPreSched2() override;
980   void addPreEmitPass() override;
981 };
982 
983 } // end anonymous namespace
984 
985 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
986   if (getOptLevel() == CodeGenOpt::Aggressive)
987     addPass(createGVNPass());
988   else
989     addPass(createEarlyCSEPass());
990 }
991 
992 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
993   addPass(createLICMPass());
994   addPass(createSeparateConstOffsetFromGEPPass());
995   addPass(createSpeculativeExecutionPass());
996   // ReassociateGEPs exposes more opportunites for SLSR. See
997   // the example in reassociate-geps-and-slsr.ll.
998   addPass(createStraightLineStrengthReducePass());
999   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1000   // EarlyCSE can reuse.
1001   addEarlyCSEOrGVNPass();
1002   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1003   addPass(createNaryReassociatePass());
1004   // NaryReassociate on GEPs creates redundant common expressions, so run
1005   // EarlyCSE after it.
1006   addPass(createEarlyCSEPass());
1007 }
1008 
1009 void AMDGPUPassConfig::addIRPasses() {
1010   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
1011 
1012   // There is no reason to run these.
1013   disablePass(&StackMapLivenessID);
1014   disablePass(&FuncletLayoutID);
1015   disablePass(&PatchableFunctionID);
1016 
1017   addPass(createAMDGPUPrintfRuntimeBinding());
1018   addPass(createAMDGPUCtorDtorLoweringPass());
1019 
1020   // This must occur before inlining, as the inliner will not look through
1021   // bitcast calls.
1022   addPass(createAMDGPUFixFunctionBitcastsPass());
1023 
1024   // A call to propagate attributes pass in the backend in case opt was not run.
1025   addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
1026 
1027   addPass(createAMDGPULowerIntrinsicsPass());
1028 
1029   // Function calls are not supported, so make sure we inline everything.
1030   addPass(createAMDGPUAlwaysInlinePass());
1031   addPass(createAlwaysInlinerLegacyPass());
1032   // We need to add the barrier noop pass, otherwise adding the function
1033   // inlining pass will cause all of the PassConfigs passes to be run
1034   // one function at a time, which means if we have a nodule with two
1035   // functions, then we will generate code for the first function
1036   // without ever running any passes on the second.
1037   addPass(createBarrierNoopPass());
1038 
1039   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1040   if (TM.getTargetTriple().getArch() == Triple::r600)
1041     addPass(createR600OpenCLImageTypeLoweringPass());
1042 
1043   // Replace OpenCL enqueued block function pointers with global variables.
1044   addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
1045 
1046   // Can increase LDS used by kernel so runs before PromoteAlloca
1047   if (EnableLowerModuleLDS) {
1048     // The pass "amdgpu-replace-lds-use-with-pointer" need to be run before the
1049     // pass "amdgpu-lower-module-lds", and also it required to be run only if
1050     // "amdgpu-lower-module-lds" pass is enabled.
1051     if (EnableLDSReplaceWithPointer)
1052       addPass(createAMDGPUReplaceLDSUseWithPointerPass());
1053 
1054     addPass(createAMDGPULowerModuleLDSPass());
1055   }
1056 
1057   if (TM.getOptLevel() > CodeGenOpt::None)
1058     addPass(createInferAddressSpacesPass());
1059 
1060   addPass(createAtomicExpandPass());
1061 
1062   if (TM.getOptLevel() > CodeGenOpt::None) {
1063     addPass(createAMDGPUPromoteAlloca());
1064 
1065     if (EnableSROA)
1066       addPass(createSROAPass());
1067     if (isPassEnabled(EnableScalarIRPasses))
1068       addStraightLineScalarOptimizationPasses();
1069 
1070     if (EnableAMDGPUAliasAnalysis) {
1071       addPass(createAMDGPUAAWrapperPass());
1072       addPass(createExternalAAWrapperPass([](Pass &P, Function &,
1073                                              AAResults &AAR) {
1074         if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1075           AAR.addAAResult(WrapperPass->getResult());
1076         }));
1077     }
1078 
1079     if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1080       // TODO: May want to move later or split into an early and late one.
1081       addPass(createAMDGPUCodeGenPreparePass());
1082     }
1083   }
1084 
1085   TargetPassConfig::addIRPasses();
1086 
1087   // EarlyCSE is not always strong enough to clean up what LSR produces. For
1088   // example, GVN can combine
1089   //
1090   //   %0 = add %a, %b
1091   //   %1 = add %b, %a
1092   //
1093   // and
1094   //
1095   //   %0 = shl nsw %a, 2
1096   //   %1 = shl %a, 2
1097   //
1098   // but EarlyCSE can do neither of them.
1099   if (isPassEnabled(EnableScalarIRPasses))
1100     addEarlyCSEOrGVNPass();
1101 }
1102 
1103 void AMDGPUPassConfig::addCodeGenPrepare() {
1104   if (TM->getTargetTriple().getArch() == Triple::amdgcn)
1105     addPass(createAMDGPUAnnotateKernelFeaturesPass());
1106 
1107   if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
1108       EnableLowerKernelArguments)
1109     addPass(createAMDGPULowerKernelArgumentsPass());
1110 
1111   TargetPassConfig::addCodeGenPrepare();
1112 
1113   if (isPassEnabled(EnableLoadStoreVectorizer))
1114     addPass(createLoadStoreVectorizerPass());
1115 
1116   // LowerSwitch pass may introduce unreachable blocks that can
1117   // cause unexpected behavior for subsequent passes. Placing it
1118   // here seems better that these blocks would get cleaned up by
1119   // UnreachableBlockElim inserted next in the pass flow.
1120   addPass(createLowerSwitchPass());
1121 }
1122 
1123 bool AMDGPUPassConfig::addPreISel() {
1124   if (TM->getOptLevel() > CodeGenOpt::None)
1125     addPass(createFlattenCFGPass());
1126   return false;
1127 }
1128 
1129 bool AMDGPUPassConfig::addInstSelector() {
1130   // Defer the verifier until FinalizeISel.
1131   addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
1132   return false;
1133 }
1134 
1135 bool AMDGPUPassConfig::addGCPasses() {
1136   // Do nothing. GC is not supported.
1137   return false;
1138 }
1139 
1140 //===----------------------------------------------------------------------===//
1141 // R600 Pass Setup
1142 //===----------------------------------------------------------------------===//
1143 
1144 bool R600PassConfig::addPreISel() {
1145   AMDGPUPassConfig::addPreISel();
1146 
1147   if (EnableR600StructurizeCFG)
1148     addPass(createStructurizeCFGPass());
1149   return false;
1150 }
1151 
1152 bool R600PassConfig::addInstSelector() {
1153   addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
1154   return false;
1155 }
1156 
1157 void R600PassConfig::addPreRegAlloc() {
1158   addPass(createR600VectorRegMerger());
1159 }
1160 
1161 void R600PassConfig::addPreSched2() {
1162   addPass(createR600EmitClauseMarkers(), false);
1163   if (EnableR600IfConvert)
1164     addPass(&IfConverterID, false);
1165   addPass(createR600ClauseMergePass(), false);
1166 }
1167 
1168 void R600PassConfig::addPreEmitPass() {
1169   addPass(createAMDGPUCFGStructurizerPass(), false);
1170   addPass(createR600ExpandSpecialInstrsPass(), false);
1171   addPass(&FinalizeMachineBundlesID, false);
1172   addPass(createR600Packetizer(), false);
1173   addPass(createR600ControlFlowFinalizer(), false);
1174 }
1175 
1176 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
1177   return new R600PassConfig(*this, PM);
1178 }
1179 
1180 //===----------------------------------------------------------------------===//
1181 // GCN Pass Setup
1182 //===----------------------------------------------------------------------===//
1183 
1184 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1185   MachineSchedContext *C) const {
1186   const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1187   if (ST.enableSIScheduler())
1188     return createSIMachineScheduler(C);
1189   return createGCNMaxOccupancyMachineScheduler(C);
1190 }
1191 
1192 bool GCNPassConfig::addPreISel() {
1193   AMDGPUPassConfig::addPreISel();
1194 
1195   if (TM->getOptLevel() > CodeGenOpt::None)
1196     addPass(createAMDGPULateCodeGenPreparePass());
1197 
1198   if (isPassEnabled(EnableAtomicOptimizations, CodeGenOpt::Less)) {
1199     addPass(createAMDGPUAtomicOptimizerPass());
1200   }
1201 
1202   if (TM->getOptLevel() > CodeGenOpt::None)
1203     addPass(createSinkingPass());
1204 
1205   // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1206   // regions formed by them.
1207   addPass(&AMDGPUUnifyDivergentExitNodesID);
1208   if (!LateCFGStructurize) {
1209     if (EnableStructurizerWorkarounds) {
1210       addPass(createFixIrreduciblePass());
1211       addPass(createUnifyLoopExitsPass());
1212     }
1213     addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1214   }
1215   addPass(createAMDGPUAnnotateUniformValues());
1216   if (!LateCFGStructurize) {
1217     addPass(createSIAnnotateControlFlowPass());
1218   }
1219   addPass(createLCSSAPass());
1220 
1221   if (TM->getOptLevel() > CodeGenOpt::Less)
1222     addPass(&AMDGPUPerfHintAnalysisID);
1223 
1224   return false;
1225 }
1226 
1227 void GCNPassConfig::addMachineSSAOptimization() {
1228   TargetPassConfig::addMachineSSAOptimization();
1229 
1230   // We want to fold operands after PeepholeOptimizer has run (or as part of
1231   // it), because it will eliminate extra copies making it easier to fold the
1232   // real source operand. We want to eliminate dead instructions after, so that
1233   // we see fewer uses of the copies. We then need to clean up the dead
1234   // instructions leftover after the operands are folded as well.
1235   //
1236   // XXX - Can we get away without running DeadMachineInstructionElim again?
1237   addPass(&SIFoldOperandsID);
1238   if (EnableDPPCombine)
1239     addPass(&GCNDPPCombineID);
1240   addPass(&SILoadStoreOptimizerID);
1241   if (isPassEnabled(EnableSDWAPeephole)) {
1242     addPass(&SIPeepholeSDWAID);
1243     addPass(&EarlyMachineLICMID);
1244     addPass(&MachineCSEID);
1245     addPass(&SIFoldOperandsID);
1246   }
1247   addPass(&DeadMachineInstructionElimID);
1248   addPass(createSIShrinkInstructionsPass());
1249 }
1250 
1251 bool GCNPassConfig::addILPOpts() {
1252   if (EnableEarlyIfConversion)
1253     addPass(&EarlyIfConverterID);
1254 
1255   TargetPassConfig::addILPOpts();
1256   return false;
1257 }
1258 
1259 bool GCNPassConfig::addInstSelector() {
1260   AMDGPUPassConfig::addInstSelector();
1261   addPass(&SIFixSGPRCopiesID);
1262   addPass(createSILowerI1CopiesPass());
1263   return false;
1264 }
1265 
1266 bool GCNPassConfig::addIRTranslator() {
1267   addPass(new IRTranslator(getOptLevel()));
1268   return false;
1269 }
1270 
1271 void GCNPassConfig::addPreLegalizeMachineIR() {
1272   bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1273   addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1274   addPass(new Localizer());
1275 }
1276 
1277 bool GCNPassConfig::addLegalizeMachineIR() {
1278   addPass(new Legalizer());
1279   return false;
1280 }
1281 
1282 void GCNPassConfig::addPreRegBankSelect() {
1283   bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1284   addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1285 }
1286 
1287 bool GCNPassConfig::addRegBankSelect() {
1288   addPass(new RegBankSelect());
1289   return false;
1290 }
1291 
1292 void GCNPassConfig::addPreGlobalInstructionSelect() {
1293   bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1294   addPass(createAMDGPURegBankCombiner(IsOptNone));
1295 }
1296 
1297 bool GCNPassConfig::addGlobalInstructionSelect() {
1298   addPass(new InstructionSelect(getOptLevel()));
1299   return false;
1300 }
1301 
1302 void GCNPassConfig::addPreRegAlloc() {
1303   if (LateCFGStructurize) {
1304     addPass(createAMDGPUMachineCFGStructurizerPass());
1305   }
1306 }
1307 
1308 void GCNPassConfig::addFastRegAlloc() {
1309   // FIXME: We have to disable the verifier here because of PHIElimination +
1310   // TwoAddressInstructions disabling it.
1311 
1312   // This must be run immediately after phi elimination and before
1313   // TwoAddressInstructions, otherwise the processing of the tied operand of
1314   // SI_ELSE will introduce a copy of the tied operand source after the else.
1315   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
1316 
1317   insertPass(&TwoAddressInstructionPassID, &SIWholeQuadModeID);
1318   insertPass(&TwoAddressInstructionPassID, &SIPreAllocateWWMRegsID);
1319 
1320   TargetPassConfig::addFastRegAlloc();
1321 }
1322 
1323 void GCNPassConfig::addOptimizedRegAlloc() {
1324   // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1325   // instructions that cause scheduling barriers.
1326   insertPass(&MachineSchedulerID, &SIWholeQuadModeID);
1327   insertPass(&MachineSchedulerID, &SIPreAllocateWWMRegsID);
1328 
1329   if (OptExecMaskPreRA)
1330     insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
1331 
1332   if (isPassEnabled(EnablePreRAOptimizations))
1333     insertPass(&RenameIndependentSubregsID, &GCNPreRAOptimizationsID);
1334 
1335   // This is not an essential optimization and it has a noticeable impact on
1336   // compilation time, so we only enable it from O2.
1337   if (TM->getOptLevel() > CodeGenOpt::Less)
1338     insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
1339 
1340   // FIXME: when an instruction has a Killed operand, and the instruction is
1341   // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1342   // the register in LiveVariables, this would trigger a failure in verifier,
1343   // we should fix it and enable the verifier.
1344   if (OptVGPRLiveRange)
1345     insertPass(&LiveVariablesID, &SIOptimizeVGPRLiveRangeID, false);
1346   // This must be run immediately after phi elimination and before
1347   // TwoAddressInstructions, otherwise the processing of the tied operand of
1348   // SI_ELSE will introduce a copy of the tied operand source after the else.
1349   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
1350 
1351   if (EnableDCEInRA)
1352     insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID);
1353 
1354   TargetPassConfig::addOptimizedRegAlloc();
1355 }
1356 
1357 bool GCNPassConfig::addPreRewrite() {
1358   if (EnableRegReassign)
1359     addPass(&GCNNSAReassignID);
1360   return true;
1361 }
1362 
1363 FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1364   // Initialize the global default.
1365   llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1366                   initializeDefaultSGPRRegisterAllocatorOnce);
1367 
1368   RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1369   if (Ctor != useDefaultRegisterAllocator)
1370     return Ctor();
1371 
1372   if (Optimized)
1373     return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1374 
1375   return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1376 }
1377 
1378 FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1379   // Initialize the global default.
1380   llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1381                   initializeDefaultVGPRRegisterAllocatorOnce);
1382 
1383   RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1384   if (Ctor != useDefaultRegisterAllocator)
1385     return Ctor();
1386 
1387   if (Optimized)
1388     return createGreedyVGPRRegisterAllocator();
1389 
1390   return createFastVGPRRegisterAllocator();
1391 }
1392 
1393 FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1394   llvm_unreachable("should not be used");
1395 }
1396 
1397 static const char RegAllocOptNotSupportedMessage[] =
1398   "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1399 
1400 bool GCNPassConfig::addRegAssignAndRewriteFast() {
1401   if (!usingDefaultRegAlloc())
1402     report_fatal_error(RegAllocOptNotSupportedMessage);
1403 
1404   addPass(createSGPRAllocPass(false));
1405 
1406   // Equivalent of PEI for SGPRs.
1407   addPass(&SILowerSGPRSpillsID);
1408 
1409   addPass(createVGPRAllocPass(false));
1410   return true;
1411 }
1412 
1413 bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1414   if (!usingDefaultRegAlloc())
1415     report_fatal_error(RegAllocOptNotSupportedMessage);
1416 
1417   addPass(createSGPRAllocPass(true));
1418 
1419   // Commit allocated register changes. This is mostly necessary because too
1420   // many things rely on the use lists of the physical registers, such as the
1421   // verifier. This is only necessary with allocators which use LiveIntervals,
1422   // since FastRegAlloc does the replacments itself.
1423   addPass(createVirtRegRewriter(false));
1424 
1425   // Equivalent of PEI for SGPRs.
1426   addPass(&SILowerSGPRSpillsID);
1427 
1428   addPass(createVGPRAllocPass(true));
1429 
1430   addPreRewrite();
1431   addPass(&VirtRegRewriterID);
1432 
1433   return true;
1434 }
1435 
1436 void GCNPassConfig::addPostRegAlloc() {
1437   addPass(&SIFixVGPRCopiesID);
1438   if (getOptLevel() > CodeGenOpt::None)
1439     addPass(&SIOptimizeExecMaskingID);
1440   TargetPassConfig::addPostRegAlloc();
1441 }
1442 
1443 void GCNPassConfig::addPreSched2() {
1444   addPass(&SIPostRABundlerID);
1445 }
1446 
1447 void GCNPassConfig::addPreEmitPass() {
1448   addPass(createSIMemoryLegalizerPass());
1449   addPass(createSIInsertWaitcntsPass());
1450 
1451   if (TM->getOptLevel() > CodeGenOpt::None)
1452     addPass(createSIShrinkInstructionsPass());
1453 
1454   addPass(createSIModeRegisterPass());
1455 
1456   if (getOptLevel() > CodeGenOpt::None)
1457     addPass(&SIInsertHardClausesID);
1458 
1459   addPass(&SILateBranchLoweringPassID);
1460   if (getOptLevel() > CodeGenOpt::None)
1461     addPass(&SIPreEmitPeepholeID);
1462   // The hazard recognizer that runs as part of the post-ra scheduler does not
1463   // guarantee to be able handle all hazards correctly. This is because if there
1464   // are multiple scheduling regions in a basic block, the regions are scheduled
1465   // bottom up, so when we begin to schedule a region we don't know what
1466   // instructions were emitted directly before it.
1467   //
1468   // Here we add a stand-alone hazard recognizer pass which can handle all
1469   // cases.
1470   addPass(&PostRAHazardRecognizerID);
1471   addPass(&BranchRelaxationPassID);
1472 }
1473 
1474 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
1475   return new GCNPassConfig(*this, PM);
1476 }
1477 
1478 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1479   return new yaml::SIMachineFunctionInfo();
1480 }
1481 
1482 yaml::MachineFunctionInfo *
1483 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1484   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1485   return new yaml::SIMachineFunctionInfo(
1486       *MFI, *MF.getSubtarget().getRegisterInfo(), MF);
1487 }
1488 
1489 bool GCNTargetMachine::parseMachineFunctionInfo(
1490     const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1491     SMDiagnostic &Error, SMRange &SourceRange) const {
1492   const yaml::SIMachineFunctionInfo &YamlMFI =
1493       reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1494   MachineFunction &MF = PFS.MF;
1495   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1496 
1497   if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1498     return true;
1499 
1500   if (MFI->Occupancy == 0) {
1501     // Fixup the subtarget dependent default value.
1502     const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1503     MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1504   }
1505 
1506   auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1507     Register TempReg;
1508     if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1509       SourceRange = RegName.SourceRange;
1510       return true;
1511     }
1512     RegVal = TempReg;
1513 
1514     return false;
1515   };
1516 
1517   auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1518     // Create a diagnostic for a the register string literal.
1519     const MemoryBuffer &Buffer =
1520         *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1521     Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1522                          RegName.Value.size(), SourceMgr::DK_Error,
1523                          "incorrect register class for field", RegName.Value,
1524                          None, None);
1525     SourceRange = RegName.SourceRange;
1526     return true;
1527   };
1528 
1529   if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1530       parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1531       parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1532     return true;
1533 
1534   if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1535       !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1536     return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1537   }
1538 
1539   if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1540       !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1541     return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1542   }
1543 
1544   if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1545       !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1546     return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1547   }
1548 
1549   auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1550                                    const TargetRegisterClass &RC,
1551                                    ArgDescriptor &Arg, unsigned UserSGPRs,
1552                                    unsigned SystemSGPRs) {
1553     // Skip parsing if it's not present.
1554     if (!A)
1555       return false;
1556 
1557     if (A->IsRegister) {
1558       Register Reg;
1559       if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1560         SourceRange = A->RegisterName.SourceRange;
1561         return true;
1562       }
1563       if (!RC.contains(Reg))
1564         return diagnoseRegisterClass(A->RegisterName);
1565       Arg = ArgDescriptor::createRegister(Reg);
1566     } else
1567       Arg = ArgDescriptor::createStack(A->StackOffset);
1568     // Check and apply the optional mask.
1569     if (A->Mask)
1570       Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1571 
1572     MFI->NumUserSGPRs += UserSGPRs;
1573     MFI->NumSystemSGPRs += SystemSGPRs;
1574     return false;
1575   };
1576 
1577   if (YamlMFI.ArgInfo &&
1578       (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1579                              AMDGPU::SGPR_128RegClass,
1580                              MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1581        parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1582                              AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1583                              2, 0) ||
1584        parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1585                              MFI->ArgInfo.QueuePtr, 2, 0) ||
1586        parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1587                              AMDGPU::SReg_64RegClass,
1588                              MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1589        parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1590                              AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1591                              2, 0) ||
1592        parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1593                              AMDGPU::SReg_64RegClass,
1594                              MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1595        parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1596                              AMDGPU::SGPR_32RegClass,
1597                              MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1598        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1599                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1600                              0, 1) ||
1601        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1602                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1603                              0, 1) ||
1604        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1605                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1606                              0, 1) ||
1607        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1608                              AMDGPU::SGPR_32RegClass,
1609                              MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1610        parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1611                              AMDGPU::SGPR_32RegClass,
1612                              MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1613        parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1614                              AMDGPU::SReg_64RegClass,
1615                              MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1616        parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1617                              AMDGPU::SReg_64RegClass,
1618                              MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1619        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1620                              AMDGPU::VGPR_32RegClass,
1621                              MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1622        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1623                              AMDGPU::VGPR_32RegClass,
1624                              MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1625        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1626                              AMDGPU::VGPR_32RegClass,
1627                              MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1628     return true;
1629 
1630   MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1631   MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1632   MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals;
1633   MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals;
1634   MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals;
1635   MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals;
1636 
1637   return false;
1638 }
1639