1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The AMDGPU target machine contains all of the hardware specific
11 /// information  needed to emit code for R600 and SI GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUTargetMachine.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUAliasAnalysis.h"
18 #include "AMDGPUCallLowering.h"
19 #include "AMDGPUExportClustering.h"
20 #include "AMDGPUInstructionSelector.h"
21 #include "AMDGPULegalizerInfo.h"
22 #include "AMDGPUMacroFusion.h"
23 #include "AMDGPUTargetObjectFile.h"
24 #include "AMDGPUTargetTransformInfo.h"
25 #include "GCNIterativeScheduler.h"
26 #include "GCNSchedStrategy.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "R600MachineScheduler.h"
29 #include "SIMachineFunctionInfo.h"
30 #include "SIMachineScheduler.h"
31 #include "TargetInfo/AMDGPUTargetInfo.h"
32 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
33 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
34 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
35 #include "llvm/CodeGen/GlobalISel/Localizer.h"
36 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
37 #include "llvm/CodeGen/MIRParser/MIParser.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/LegacyPassManager.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/TargetRegistry.h"
48 #include "llvm/Target/TargetLoweringObjectFile.h"
49 #include "llvm/Transforms/IPO.h"
50 #include "llvm/Transforms/IPO/AlwaysInliner.h"
51 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
52 #include "llvm/Transforms/Scalar.h"
53 #include "llvm/Transforms/Scalar/GVN.h"
54 #include "llvm/Transforms/Utils.h"
55 #include "llvm/Transforms/Vectorize.h"
56 #include <memory>
57 
58 using namespace llvm;
59 
60 static cl::opt<bool> EnableR600StructurizeCFG(
61   "r600-ir-structurize",
62   cl::desc("Use StructurizeCFG IR pass"),
63   cl::init(true));
64 
65 static cl::opt<bool> EnableSROA(
66   "amdgpu-sroa",
67   cl::desc("Run SROA after promote alloca pass"),
68   cl::ReallyHidden,
69   cl::init(true));
70 
71 static cl::opt<bool>
72 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
73                         cl::desc("Run early if-conversion"),
74                         cl::init(false));
75 
76 static cl::opt<bool>
77 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
78             cl::desc("Run pre-RA exec mask optimizations"),
79             cl::init(true));
80 
81 static cl::opt<bool> EnableR600IfConvert(
82   "r600-if-convert",
83   cl::desc("Use if conversion pass"),
84   cl::ReallyHidden,
85   cl::init(true));
86 
87 // Option to disable vectorizer for tests.
88 static cl::opt<bool> EnableLoadStoreVectorizer(
89   "amdgpu-load-store-vectorizer",
90   cl::desc("Enable load store vectorizer"),
91   cl::init(true),
92   cl::Hidden);
93 
94 // Option to control global loads scalarization
95 static cl::opt<bool> ScalarizeGlobal(
96   "amdgpu-scalarize-global-loads",
97   cl::desc("Enable global load scalarization"),
98   cl::init(true),
99   cl::Hidden);
100 
101 // Option to run internalize pass.
102 static cl::opt<bool> InternalizeSymbols(
103   "amdgpu-internalize-symbols",
104   cl::desc("Enable elimination of non-kernel functions and unused globals"),
105   cl::init(false),
106   cl::Hidden);
107 
108 // Option to inline all early.
109 static cl::opt<bool> EarlyInlineAll(
110   "amdgpu-early-inline-all",
111   cl::desc("Inline all functions early"),
112   cl::init(false),
113   cl::Hidden);
114 
115 static cl::opt<bool> EnableSDWAPeephole(
116   "amdgpu-sdwa-peephole",
117   cl::desc("Enable SDWA peepholer"),
118   cl::init(true));
119 
120 static cl::opt<bool> EnableDPPCombine(
121   "amdgpu-dpp-combine",
122   cl::desc("Enable DPP combiner"),
123   cl::init(true));
124 
125 // Enable address space based alias analysis
126 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
127   cl::desc("Enable AMDGPU Alias Analysis"),
128   cl::init(true));
129 
130 // Option to run late CFG structurizer
131 static cl::opt<bool, true> LateCFGStructurize(
132   "amdgpu-late-structurize",
133   cl::desc("Enable late CFG structurization"),
134   cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
135   cl::Hidden);
136 
137 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
138   "amdgpu-function-calls",
139   cl::desc("Enable AMDGPU function call support"),
140   cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
141   cl::init(true),
142   cl::Hidden);
143 
144 static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt(
145   "amdgpu-fixed-function-abi",
146   cl::desc("Enable all implicit function arguments"),
147   cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI),
148   cl::init(false),
149   cl::Hidden);
150 
151 // Enable lib calls simplifications
152 static cl::opt<bool> EnableLibCallSimplify(
153   "amdgpu-simplify-libcall",
154   cl::desc("Enable amdgpu library simplifications"),
155   cl::init(true),
156   cl::Hidden);
157 
158 static cl::opt<bool> EnableLowerKernelArguments(
159   "amdgpu-ir-lower-kernel-arguments",
160   cl::desc("Lower kernel argument loads in IR pass"),
161   cl::init(true),
162   cl::Hidden);
163 
164 static cl::opt<bool> EnableRegReassign(
165   "amdgpu-reassign-regs",
166   cl::desc("Enable register reassign optimizations on gfx10+"),
167   cl::init(true),
168   cl::Hidden);
169 
170 // Enable atomic optimization
171 static cl::opt<bool> EnableAtomicOptimizations(
172   "amdgpu-atomic-optimizations",
173   cl::desc("Enable atomic optimizations"),
174   cl::init(false),
175   cl::Hidden);
176 
177 // Enable Mode register optimization
178 static cl::opt<bool> EnableSIModeRegisterPass(
179   "amdgpu-mode-register",
180   cl::desc("Enable mode register pass"),
181   cl::init(true),
182   cl::Hidden);
183 
184 // Option is used in lit tests to prevent deadcoding of patterns inspected.
185 static cl::opt<bool>
186 EnableDCEInRA("amdgpu-dce-in-ra",
187     cl::init(true), cl::Hidden,
188     cl::desc("Enable machine DCE inside regalloc"));
189 
190 static cl::opt<bool> EnableScalarIRPasses(
191   "amdgpu-scalar-ir-passes",
192   cl::desc("Enable scalar IR passes"),
193   cl::init(true),
194   cl::Hidden);
195 
196 static cl::opt<bool> EnableStructurizerWorkarounds(
197     "amdgpu-enable-structurizer-workarounds",
198     cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
199     cl::Hidden);
200 
201 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
202   // Register the target
203   RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
204   RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
205 
206   PassRegistry *PR = PassRegistry::getPassRegistry();
207   initializeR600ClauseMergePassPass(*PR);
208   initializeR600ControlFlowFinalizerPass(*PR);
209   initializeR600PacketizerPass(*PR);
210   initializeR600ExpandSpecialInstrsPassPass(*PR);
211   initializeR600VectorRegMergerPass(*PR);
212   initializeGlobalISel(*PR);
213   initializeAMDGPUDAGToDAGISelPass(*PR);
214   initializeGCNDPPCombinePass(*PR);
215   initializeSILowerI1CopiesPass(*PR);
216   initializeSILowerSGPRSpillsPass(*PR);
217   initializeSIFixSGPRCopiesPass(*PR);
218   initializeSIFixVGPRCopiesPass(*PR);
219   initializeSIFoldOperandsPass(*PR);
220   initializeSIPeepholeSDWAPass(*PR);
221   initializeSIShrinkInstructionsPass(*PR);
222   initializeSIOptimizeExecMaskingPreRAPass(*PR);
223   initializeSILoadStoreOptimizerPass(*PR);
224   initializeAMDGPUFixFunctionBitcastsPass(*PR);
225   initializeAMDGPUAlwaysInlinePass(*PR);
226   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
227   initializeAMDGPUAnnotateUniformValuesPass(*PR);
228   initializeAMDGPUArgumentUsageInfoPass(*PR);
229   initializeAMDGPUAtomicOptimizerPass(*PR);
230   initializeAMDGPULowerKernelArgumentsPass(*PR);
231   initializeAMDGPULowerKernelAttributesPass(*PR);
232   initializeAMDGPULowerIntrinsicsPass(*PR);
233   initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
234   initializeAMDGPUPostLegalizerCombinerPass(*PR);
235   initializeAMDGPUPreLegalizerCombinerPass(*PR);
236   initializeAMDGPUPromoteAllocaPass(*PR);
237   initializeAMDGPUPromoteAllocaToVectorPass(*PR);
238   initializeAMDGPUCodeGenPreparePass(*PR);
239   initializeAMDGPULateCodeGenPreparePass(*PR);
240   initializeAMDGPUPropagateAttributesEarlyPass(*PR);
241   initializeAMDGPUPropagateAttributesLatePass(*PR);
242   initializeAMDGPURewriteOutArgumentsPass(*PR);
243   initializeAMDGPUUnifyMetadataPass(*PR);
244   initializeSIAnnotateControlFlowPass(*PR);
245   initializeSIInsertHardClausesPass(*PR);
246   initializeSIInsertWaitcntsPass(*PR);
247   initializeSIModeRegisterPass(*PR);
248   initializeSIWholeQuadModePass(*PR);
249   initializeSILowerControlFlowPass(*PR);
250   initializeSIRemoveShortExecBranchesPass(*PR);
251   initializeSIPreEmitPeepholePass(*PR);
252   initializeSIInsertSkipsPass(*PR);
253   initializeSIMemoryLegalizerPass(*PR);
254   initializeSIOptimizeExecMaskingPass(*PR);
255   initializeSIPreAllocateWWMRegsPass(*PR);
256   initializeSIFormMemoryClausesPass(*PR);
257   initializeSIPostRABundlerPass(*PR);
258   initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
259   initializeAMDGPUAAWrapperPassPass(*PR);
260   initializeAMDGPUExternalAAWrapperPass(*PR);
261   initializeAMDGPUUseNativeCallsPass(*PR);
262   initializeAMDGPUSimplifyLibCallsPass(*PR);
263   initializeAMDGPUInlinerPass(*PR);
264   initializeAMDGPUPrintfRuntimeBindingPass(*PR);
265   initializeGCNRegBankReassignPass(*PR);
266   initializeGCNNSAReassignPass(*PR);
267   initializeSIAddIMGInitPass(*PR);
268 }
269 
270 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
271   return std::make_unique<AMDGPUTargetObjectFile>();
272 }
273 
274 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
275   return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>());
276 }
277 
278 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
279   return new SIScheduleDAGMI(C);
280 }
281 
282 static ScheduleDAGInstrs *
283 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
284   ScheduleDAGMILive *DAG =
285     new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
286   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
287   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
288   DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
289   return DAG;
290 }
291 
292 static ScheduleDAGInstrs *
293 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
294   auto DAG = new GCNIterativeScheduler(C,
295     GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
296   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
297   return DAG;
298 }
299 
300 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
301   return new GCNIterativeScheduler(C,
302     GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
303 }
304 
305 static ScheduleDAGInstrs *
306 createIterativeILPMachineScheduler(MachineSchedContext *C) {
307   auto DAG = new GCNIterativeScheduler(C,
308     GCNIterativeScheduler::SCHEDULE_ILP);
309   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
310   DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
311   return DAG;
312 }
313 
314 static MachineSchedRegistry
315 R600SchedRegistry("r600", "Run R600's custom scheduler",
316                    createR600MachineScheduler);
317 
318 static MachineSchedRegistry
319 SISchedRegistry("si", "Run SI's custom scheduler",
320                 createSIMachineScheduler);
321 
322 static MachineSchedRegistry
323 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
324                              "Run GCN scheduler to maximize occupancy",
325                              createGCNMaxOccupancyMachineScheduler);
326 
327 static MachineSchedRegistry
328 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
329   "Run GCN scheduler to maximize occupancy (experimental)",
330   createIterativeGCNMaxOccupancyMachineScheduler);
331 
332 static MachineSchedRegistry
333 GCNMinRegSchedRegistry("gcn-minreg",
334   "Run GCN iterative scheduler for minimal register usage (experimental)",
335   createMinRegScheduler);
336 
337 static MachineSchedRegistry
338 GCNILPSchedRegistry("gcn-ilp",
339   "Run GCN iterative scheduler for ILP scheduling (experimental)",
340   createIterativeILPMachineScheduler);
341 
342 static StringRef computeDataLayout(const Triple &TT) {
343   if (TT.getArch() == Triple::r600) {
344     // 32-bit pointers.
345       return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
346              "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
347   }
348 
349   // 32-bit private, local, and region pointers. 64-bit global, constant and
350   // flat, non-integral buffer fat pointers.
351     return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
352          "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
353          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
354          "-ni:7";
355 }
356 
357 LLVM_READNONE
358 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
359   if (!GPU.empty())
360     return GPU;
361 
362   // Need to default to a target with flat support for HSA.
363   if (TT.getArch() == Triple::amdgcn)
364     return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
365 
366   return "r600";
367 }
368 
369 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
370   // The AMDGPU toolchain only supports generating shared objects, so we
371   // must always use PIC.
372   return Reloc::PIC_;
373 }
374 
375 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
376                                          StringRef CPU, StringRef FS,
377                                          TargetOptions Options,
378                                          Optional<Reloc::Model> RM,
379                                          Optional<CodeModel::Model> CM,
380                                          CodeGenOpt::Level OptLevel)
381     : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
382                         FS, Options, getEffectiveRelocModel(RM),
383                         getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
384       TLOF(createTLOF(getTargetTriple())) {
385   initAsmInfo();
386   if (TT.getArch() == Triple::amdgcn) {
387     if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
388       MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64));
389     else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
390       MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32));
391   }
392 }
393 
394 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
395 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
396 bool AMDGPUTargetMachine::EnableFixedFunctionABI = false;
397 
398 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
399 
400 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
401   Attribute GPUAttr = F.getFnAttribute("target-cpu");
402   return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
403 }
404 
405 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
406   Attribute FSAttr = F.getFnAttribute("target-features");
407 
408   return FSAttr.isValid() ? FSAttr.getValueAsString()
409                           : getTargetFeatureString();
410 }
411 
412 /// Predicate for Internalize pass.
413 static bool mustPreserveGV(const GlobalValue &GV) {
414   if (const Function *F = dyn_cast<Function>(&GV))
415     return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
416 
417   return !GV.use_empty();
418 }
419 
420 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
421   Builder.DivergentTarget = true;
422 
423   bool EnableOpt = getOptLevel() > CodeGenOpt::None;
424   bool Internalize = InternalizeSymbols;
425   bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
426   bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
427   bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
428 
429   if (EnableFunctionCalls) {
430     delete Builder.Inliner;
431     Builder.Inliner = createAMDGPUFunctionInliningPass();
432   }
433 
434   Builder.addExtension(
435     PassManagerBuilder::EP_ModuleOptimizerEarly,
436     [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
437                                                legacy::PassManagerBase &PM) {
438       if (AMDGPUAA) {
439         PM.add(createAMDGPUAAWrapperPass());
440         PM.add(createAMDGPUExternalAAWrapperPass());
441       }
442       PM.add(createAMDGPUUnifyMetadataPass());
443       PM.add(createAMDGPUPrintfRuntimeBinding());
444       if (Internalize)
445         PM.add(createInternalizePass(mustPreserveGV));
446       PM.add(createAMDGPUPropagateAttributesLatePass(this));
447       if (Internalize)
448         PM.add(createGlobalDCEPass());
449       if (EarlyInline)
450         PM.add(createAMDGPUAlwaysInlinePass(false));
451   });
452 
453   Builder.addExtension(
454     PassManagerBuilder::EP_EarlyAsPossible,
455     [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &,
456                                       legacy::PassManagerBase &PM) {
457       if (AMDGPUAA) {
458         PM.add(createAMDGPUAAWrapperPass());
459         PM.add(createAMDGPUExternalAAWrapperPass());
460       }
461       PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
462       PM.add(llvm::createAMDGPUUseNativeCallsPass());
463       if (LibCallSimplify)
464         PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this));
465   });
466 
467   Builder.addExtension(
468     PassManagerBuilder::EP_CGSCCOptimizerLate,
469     [EnableOpt](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
470       // Add infer address spaces pass to the opt pipeline after inlining
471       // but before SROA to increase SROA opportunities.
472       PM.add(createInferAddressSpacesPass());
473 
474       // This should run after inlining to have any chance of doing anything,
475       // and before other cleanup optimizations.
476       PM.add(createAMDGPULowerKernelAttributesPass());
477 
478       // Promote alloca to vector before SROA and loop unroll. If we manage
479       // to eliminate allocas before unroll we may choose to unroll less.
480       if (EnableOpt)
481         PM.add(createAMDGPUPromoteAllocaToVector());
482   });
483 }
484 
485 //===----------------------------------------------------------------------===//
486 // R600 Target Machine (R600 -> Cayman)
487 //===----------------------------------------------------------------------===//
488 
489 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
490                                      StringRef CPU, StringRef FS,
491                                      TargetOptions Options,
492                                      Optional<Reloc::Model> RM,
493                                      Optional<CodeModel::Model> CM,
494                                      CodeGenOpt::Level OL, bool JIT)
495     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
496   setRequiresStructuredCFG(true);
497 
498   // Override the default since calls aren't supported for r600.
499   if (EnableFunctionCalls &&
500       EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
501     EnableFunctionCalls = false;
502 }
503 
504 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
505   const Function &F) const {
506   StringRef GPU = getGPUName(F);
507   StringRef FS = getFeatureString(F);
508 
509   SmallString<128> SubtargetKey(GPU);
510   SubtargetKey.append(FS);
511 
512   auto &I = SubtargetMap[SubtargetKey];
513   if (!I) {
514     // This needs to be done before we create a new subtarget since any
515     // creation will depend on the TM and the code generation flags on the
516     // function that reside in TargetOptions.
517     resetTargetOptions(F);
518     I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
519   }
520 
521   return I.get();
522 }
523 
524 bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS,
525                                               unsigned DestAS) const {
526   return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
527          AMDGPU::isFlatGlobalAddrSpace(DestAS);
528 }
529 
530 TargetTransformInfo
531 R600TargetMachine::getTargetTransformInfo(const Function &F) {
532   return TargetTransformInfo(R600TTIImpl(this, F));
533 }
534 
535 //===----------------------------------------------------------------------===//
536 // GCN Target Machine (SI+)
537 //===----------------------------------------------------------------------===//
538 
539 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
540                                    StringRef CPU, StringRef FS,
541                                    TargetOptions Options,
542                                    Optional<Reloc::Model> RM,
543                                    Optional<CodeModel::Model> CM,
544                                    CodeGenOpt::Level OL, bool JIT)
545     : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
546 
547 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
548   StringRef GPU = getGPUName(F);
549   StringRef FS = getFeatureString(F);
550 
551   SmallString<128> SubtargetKey(GPU);
552   SubtargetKey.append(FS);
553 
554   auto &I = SubtargetMap[SubtargetKey];
555   if (!I) {
556     // This needs to be done before we create a new subtarget since any
557     // creation will depend on the TM and the code generation flags on the
558     // function that reside in TargetOptions.
559     resetTargetOptions(F);
560     I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
561   }
562 
563   I->setScalarizeGlobalBehavior(ScalarizeGlobal);
564 
565   return I.get();
566 }
567 
568 TargetTransformInfo
569 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
570   return TargetTransformInfo(GCNTTIImpl(this, F));
571 }
572 
573 //===----------------------------------------------------------------------===//
574 // AMDGPU Pass Setup
575 //===----------------------------------------------------------------------===//
576 
577 namespace {
578 
579 class AMDGPUPassConfig : public TargetPassConfig {
580 public:
581   AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
582     : TargetPassConfig(TM, PM) {
583     // Exceptions and StackMaps are not supported, so these passes will never do
584     // anything.
585     disablePass(&StackMapLivenessID);
586     disablePass(&FuncletLayoutID);
587   }
588 
589   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
590     return getTM<AMDGPUTargetMachine>();
591   }
592 
593   ScheduleDAGInstrs *
594   createMachineScheduler(MachineSchedContext *C) const override {
595     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
596     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
597     return DAG;
598   }
599 
600   void addEarlyCSEOrGVNPass();
601   void addStraightLineScalarOptimizationPasses();
602   void addIRPasses() override;
603   void addCodeGenPrepare() override;
604   bool addPreISel() override;
605   bool addInstSelector() override;
606   bool addGCPasses() override;
607 
608   std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
609 };
610 
611 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
612   return getStandardCSEConfigForOpt(TM->getOptLevel());
613 }
614 
615 class R600PassConfig final : public AMDGPUPassConfig {
616 public:
617   R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
618     : AMDGPUPassConfig(TM, PM) {}
619 
620   ScheduleDAGInstrs *createMachineScheduler(
621     MachineSchedContext *C) const override {
622     return createR600MachineScheduler(C);
623   }
624 
625   bool addPreISel() override;
626   bool addInstSelector() override;
627   void addPreRegAlloc() override;
628   void addPreSched2() override;
629   void addPreEmitPass() override;
630 };
631 
632 class GCNPassConfig final : public AMDGPUPassConfig {
633 public:
634   GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
635     : AMDGPUPassConfig(TM, PM) {
636     // It is necessary to know the register usage of the entire call graph.  We
637     // allow calls without EnableAMDGPUFunctionCalls if they are marked
638     // noinline, so this is always required.
639     setRequiresCodeGenSCCOrder(true);
640   }
641 
642   GCNTargetMachine &getGCNTargetMachine() const {
643     return getTM<GCNTargetMachine>();
644   }
645 
646   ScheduleDAGInstrs *
647   createMachineScheduler(MachineSchedContext *C) const override;
648 
649   bool addPreISel() override;
650   void addMachineSSAOptimization() override;
651   bool addILPOpts() override;
652   bool addInstSelector() override;
653   bool addIRTranslator() override;
654   void addPreLegalizeMachineIR() override;
655   bool addLegalizeMachineIR() override;
656   void addPreRegBankSelect() override;
657   bool addRegBankSelect() override;
658   bool addGlobalInstructionSelect() override;
659   void addFastRegAlloc() override;
660   void addOptimizedRegAlloc() override;
661   void addPreRegAlloc() override;
662   bool addPreRewrite() override;
663   void addPostRegAlloc() override;
664   void addPreSched2() override;
665   void addPreEmitPass() override;
666 };
667 
668 } // end anonymous namespace
669 
670 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
671   if (getOptLevel() == CodeGenOpt::Aggressive)
672     addPass(createGVNPass());
673   else
674     addPass(createEarlyCSEPass());
675 }
676 
677 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
678   addPass(createLICMPass());
679   addPass(createSeparateConstOffsetFromGEPPass());
680   addPass(createSpeculativeExecutionPass());
681   // ReassociateGEPs exposes more opportunites for SLSR. See
682   // the example in reassociate-geps-and-slsr.ll.
683   addPass(createStraightLineStrengthReducePass());
684   // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
685   // EarlyCSE can reuse.
686   addEarlyCSEOrGVNPass();
687   // Run NaryReassociate after EarlyCSE/GVN to be more effective.
688   addPass(createNaryReassociatePass());
689   // NaryReassociate on GEPs creates redundant common expressions, so run
690   // EarlyCSE after it.
691   addPass(createEarlyCSEPass());
692 }
693 
694 void AMDGPUPassConfig::addIRPasses() {
695   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
696 
697   // There is no reason to run these.
698   disablePass(&StackMapLivenessID);
699   disablePass(&FuncletLayoutID);
700   disablePass(&PatchableFunctionID);
701 
702   addPass(createAMDGPUPrintfRuntimeBinding());
703 
704   // This must occur before inlining, as the inliner will not look through
705   // bitcast calls.
706   addPass(createAMDGPUFixFunctionBitcastsPass());
707 
708   // A call to propagate attributes pass in the backend in case opt was not run.
709   addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
710 
711   addPass(createAtomicExpandPass());
712 
713 
714   addPass(createAMDGPULowerIntrinsicsPass());
715 
716   // Function calls are not supported, so make sure we inline everything.
717   addPass(createAMDGPUAlwaysInlinePass());
718   addPass(createAlwaysInlinerLegacyPass());
719   // We need to add the barrier noop pass, otherwise adding the function
720   // inlining pass will cause all of the PassConfigs passes to be run
721   // one function at a time, which means if we have a nodule with two
722   // functions, then we will generate code for the first function
723   // without ever running any passes on the second.
724   addPass(createBarrierNoopPass());
725 
726   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
727   if (TM.getTargetTriple().getArch() == Triple::r600)
728     addPass(createR600OpenCLImageTypeLoweringPass());
729 
730   // Replace OpenCL enqueued block function pointers with global variables.
731   addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
732 
733   if (TM.getOptLevel() > CodeGenOpt::None) {
734     addPass(createInferAddressSpacesPass());
735     addPass(createAMDGPUPromoteAlloca());
736 
737     if (EnableSROA)
738       addPass(createSROAPass());
739 
740     if (EnableScalarIRPasses)
741       addStraightLineScalarOptimizationPasses();
742 
743     if (EnableAMDGPUAliasAnalysis) {
744       addPass(createAMDGPUAAWrapperPass());
745       addPass(createExternalAAWrapperPass([](Pass &P, Function &,
746                                              AAResults &AAR) {
747         if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
748           AAR.addAAResult(WrapperPass->getResult());
749         }));
750     }
751   }
752 
753   if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
754     // TODO: May want to move later or split into an early and late one.
755     addPass(createAMDGPUCodeGenPreparePass());
756   }
757 
758   TargetPassConfig::addIRPasses();
759 
760   // EarlyCSE is not always strong enough to clean up what LSR produces. For
761   // example, GVN can combine
762   //
763   //   %0 = add %a, %b
764   //   %1 = add %b, %a
765   //
766   // and
767   //
768   //   %0 = shl nsw %a, 2
769   //   %1 = shl %a, 2
770   //
771   // but EarlyCSE can do neither of them.
772   if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
773     addEarlyCSEOrGVNPass();
774 }
775 
776 void AMDGPUPassConfig::addCodeGenPrepare() {
777   if (TM->getTargetTriple().getArch() == Triple::amdgcn)
778     addPass(createAMDGPUAnnotateKernelFeaturesPass());
779 
780   if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
781       EnableLowerKernelArguments)
782     addPass(createAMDGPULowerKernelArgumentsPass());
783 
784   addPass(&AMDGPUPerfHintAnalysisID);
785 
786   TargetPassConfig::addCodeGenPrepare();
787 
788   if (EnableLoadStoreVectorizer)
789     addPass(createLoadStoreVectorizerPass());
790 
791   // LowerSwitch pass may introduce unreachable blocks that can
792   // cause unexpected behavior for subsequent passes. Placing it
793   // here seems better that these blocks would get cleaned up by
794   // UnreachableBlockElim inserted next in the pass flow.
795   addPass(createLowerSwitchPass());
796 }
797 
798 bool AMDGPUPassConfig::addPreISel() {
799   addPass(createFlattenCFGPass());
800   return false;
801 }
802 
803 bool AMDGPUPassConfig::addInstSelector() {
804   // Defer the verifier until FinalizeISel.
805   addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
806   return false;
807 }
808 
809 bool AMDGPUPassConfig::addGCPasses() {
810   // Do nothing. GC is not supported.
811   return false;
812 }
813 
814 //===----------------------------------------------------------------------===//
815 // R600 Pass Setup
816 //===----------------------------------------------------------------------===//
817 
818 bool R600PassConfig::addPreISel() {
819   AMDGPUPassConfig::addPreISel();
820 
821   if (EnableR600StructurizeCFG)
822     addPass(createStructurizeCFGPass());
823   return false;
824 }
825 
826 bool R600PassConfig::addInstSelector() {
827   addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
828   return false;
829 }
830 
831 void R600PassConfig::addPreRegAlloc() {
832   addPass(createR600VectorRegMerger());
833 }
834 
835 void R600PassConfig::addPreSched2() {
836   addPass(createR600EmitClauseMarkers(), false);
837   if (EnableR600IfConvert)
838     addPass(&IfConverterID, false);
839   addPass(createR600ClauseMergePass(), false);
840 }
841 
842 void R600PassConfig::addPreEmitPass() {
843   addPass(createAMDGPUCFGStructurizerPass(), false);
844   addPass(createR600ExpandSpecialInstrsPass(), false);
845   addPass(&FinalizeMachineBundlesID, false);
846   addPass(createR600Packetizer(), false);
847   addPass(createR600ControlFlowFinalizer(), false);
848 }
849 
850 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
851   return new R600PassConfig(*this, PM);
852 }
853 
854 //===----------------------------------------------------------------------===//
855 // GCN Pass Setup
856 //===----------------------------------------------------------------------===//
857 
858 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
859   MachineSchedContext *C) const {
860   const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
861   if (ST.enableSIScheduler())
862     return createSIMachineScheduler(C);
863   return createGCNMaxOccupancyMachineScheduler(C);
864 }
865 
866 bool GCNPassConfig::addPreISel() {
867   AMDGPUPassConfig::addPreISel();
868 
869   addPass(createAMDGPULateCodeGenPreparePass());
870   if (EnableAtomicOptimizations) {
871     addPass(createAMDGPUAtomicOptimizerPass());
872   }
873 
874   // FIXME: We need to run a pass to propagate the attributes when calls are
875   // supported.
876 
877   // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
878   // regions formed by them.
879   addPass(&AMDGPUUnifyDivergentExitNodesID);
880   if (!LateCFGStructurize) {
881     if (EnableStructurizerWorkarounds) {
882       addPass(createFixIrreduciblePass());
883       addPass(createUnifyLoopExitsPass());
884     }
885     addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
886   }
887   addPass(createSinkingPass());
888   addPass(createAMDGPUAnnotateUniformValues());
889   if (!LateCFGStructurize) {
890     addPass(createSIAnnotateControlFlowPass());
891   }
892   addPass(createLCSSAPass());
893 
894   return false;
895 }
896 
897 void GCNPassConfig::addMachineSSAOptimization() {
898   TargetPassConfig::addMachineSSAOptimization();
899 
900   // We want to fold operands after PeepholeOptimizer has run (or as part of
901   // it), because it will eliminate extra copies making it easier to fold the
902   // real source operand. We want to eliminate dead instructions after, so that
903   // we see fewer uses of the copies. We then need to clean up the dead
904   // instructions leftover after the operands are folded as well.
905   //
906   // XXX - Can we get away without running DeadMachineInstructionElim again?
907   addPass(&SIFoldOperandsID);
908   if (EnableDPPCombine)
909     addPass(&GCNDPPCombineID);
910   addPass(&DeadMachineInstructionElimID);
911   addPass(&SILoadStoreOptimizerID);
912   if (EnableSDWAPeephole) {
913     addPass(&SIPeepholeSDWAID);
914     addPass(&EarlyMachineLICMID);
915     addPass(&MachineCSEID);
916     addPass(&SIFoldOperandsID);
917     addPass(&DeadMachineInstructionElimID);
918   }
919   addPass(createSIShrinkInstructionsPass());
920 }
921 
922 bool GCNPassConfig::addILPOpts() {
923   if (EnableEarlyIfConversion)
924     addPass(&EarlyIfConverterID);
925 
926   TargetPassConfig::addILPOpts();
927   return false;
928 }
929 
930 bool GCNPassConfig::addInstSelector() {
931   AMDGPUPassConfig::addInstSelector();
932   addPass(&SIFixSGPRCopiesID);
933   addPass(createSILowerI1CopiesPass());
934   addPass(createSIAddIMGInitPass());
935   return false;
936 }
937 
938 bool GCNPassConfig::addIRTranslator() {
939   addPass(new IRTranslator(getOptLevel()));
940   return false;
941 }
942 
943 void GCNPassConfig::addPreLegalizeMachineIR() {
944   bool IsOptNone = getOptLevel() == CodeGenOpt::None;
945   addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
946   addPass(new Localizer());
947 }
948 
949 bool GCNPassConfig::addLegalizeMachineIR() {
950   addPass(new Legalizer());
951   return false;
952 }
953 
954 void GCNPassConfig::addPreRegBankSelect() {
955   bool IsOptNone = getOptLevel() == CodeGenOpt::None;
956   addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
957 }
958 
959 bool GCNPassConfig::addRegBankSelect() {
960   addPass(new RegBankSelect());
961   return false;
962 }
963 
964 bool GCNPassConfig::addGlobalInstructionSelect() {
965   addPass(new InstructionSelect());
966   return false;
967 }
968 
969 void GCNPassConfig::addPreRegAlloc() {
970   if (LateCFGStructurize) {
971     addPass(createAMDGPUMachineCFGStructurizerPass());
972   }
973 }
974 
975 void GCNPassConfig::addFastRegAlloc() {
976   // FIXME: We have to disable the verifier here because of PHIElimination +
977   // TwoAddressInstructions disabling it.
978 
979   // This must be run immediately after phi elimination and before
980   // TwoAddressInstructions, otherwise the processing of the tied operand of
981   // SI_ELSE will introduce a copy of the tied operand source after the else.
982   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
983 
984   insertPass(&TwoAddressInstructionPassID, &SIWholeQuadModeID);
985   insertPass(&TwoAddressInstructionPassID, &SIPreAllocateWWMRegsID);
986 
987   TargetPassConfig::addFastRegAlloc();
988 }
989 
990 void GCNPassConfig::addOptimizedRegAlloc() {
991   // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
992   // instructions that cause scheduling barriers.
993   insertPass(&MachineSchedulerID, &SIWholeQuadModeID);
994   insertPass(&MachineSchedulerID, &SIPreAllocateWWMRegsID);
995 
996   if (OptExecMaskPreRA)
997     insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
998   insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
999 
1000   // This must be run immediately after phi elimination and before
1001   // TwoAddressInstructions, otherwise the processing of the tied operand of
1002   // SI_ELSE will introduce a copy of the tied operand source after the else.
1003   insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
1004 
1005   if (EnableDCEInRA)
1006     insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID);
1007 
1008   TargetPassConfig::addOptimizedRegAlloc();
1009 }
1010 
1011 bool GCNPassConfig::addPreRewrite() {
1012   if (EnableRegReassign) {
1013     addPass(&GCNNSAReassignID);
1014     addPass(&GCNRegBankReassignID);
1015   }
1016   return true;
1017 }
1018 
1019 void GCNPassConfig::addPostRegAlloc() {
1020   addPass(&SIFixVGPRCopiesID);
1021   if (getOptLevel() > CodeGenOpt::None)
1022     addPass(&SIOptimizeExecMaskingID);
1023   TargetPassConfig::addPostRegAlloc();
1024 
1025   // Equivalent of PEI for SGPRs.
1026   addPass(&SILowerSGPRSpillsID);
1027 }
1028 
1029 void GCNPassConfig::addPreSched2() {
1030   addPass(&SIPostRABundlerID);
1031 }
1032 
1033 void GCNPassConfig::addPreEmitPass() {
1034   addPass(createSIMemoryLegalizerPass());
1035   addPass(createSIInsertWaitcntsPass());
1036   addPass(createSIShrinkInstructionsPass());
1037   addPass(createSIModeRegisterPass());
1038 
1039   if (getOptLevel() > CodeGenOpt::None)
1040     addPass(&SIInsertHardClausesID);
1041 
1042   addPass(&SIRemoveShortExecBranchesID);
1043   addPass(&SIInsertSkipsPassID);
1044   addPass(&SIPreEmitPeepholeID);
1045   // The hazard recognizer that runs as part of the post-ra scheduler does not
1046   // guarantee to be able handle all hazards correctly. This is because if there
1047   // are multiple scheduling regions in a basic block, the regions are scheduled
1048   // bottom up, so when we begin to schedule a region we don't know what
1049   // instructions were emitted directly before it.
1050   //
1051   // Here we add a stand-alone hazard recognizer pass which can handle all
1052   // cases.
1053   addPass(&PostRAHazardRecognizerID);
1054   addPass(&BranchRelaxationPassID);
1055 }
1056 
1057 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
1058   return new GCNPassConfig(*this, PM);
1059 }
1060 
1061 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1062   return new yaml::SIMachineFunctionInfo();
1063 }
1064 
1065 yaml::MachineFunctionInfo *
1066 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1067   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1068   return new yaml::SIMachineFunctionInfo(*MFI,
1069                                          *MF.getSubtarget().getRegisterInfo());
1070 }
1071 
1072 bool GCNTargetMachine::parseMachineFunctionInfo(
1073     const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1074     SMDiagnostic &Error, SMRange &SourceRange) const {
1075   const yaml::SIMachineFunctionInfo &YamlMFI =
1076       reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1077   MachineFunction &MF = PFS.MF;
1078   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1079 
1080   MFI->initializeBaseYamlFields(YamlMFI);
1081 
1082   auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1083     Register TempReg;
1084     if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1085       SourceRange = RegName.SourceRange;
1086       return true;
1087     }
1088     RegVal = TempReg;
1089 
1090     return false;
1091   };
1092 
1093   auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1094     // Create a diagnostic for a the register string literal.
1095     const MemoryBuffer &Buffer =
1096         *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1097     Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1098                          RegName.Value.size(), SourceMgr::DK_Error,
1099                          "incorrect register class for field", RegName.Value,
1100                          None, None);
1101     SourceRange = RegName.SourceRange;
1102     return true;
1103   };
1104 
1105   if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1106       parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1107       parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1108     return true;
1109 
1110   if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1111       !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1112     return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1113   }
1114 
1115   if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1116       !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1117     return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1118   }
1119 
1120   if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1121       !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1122     return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1123   }
1124 
1125   auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1126                                    const TargetRegisterClass &RC,
1127                                    ArgDescriptor &Arg, unsigned UserSGPRs,
1128                                    unsigned SystemSGPRs) {
1129     // Skip parsing if it's not present.
1130     if (!A)
1131       return false;
1132 
1133     if (A->IsRegister) {
1134       Register Reg;
1135       if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1136         SourceRange = A->RegisterName.SourceRange;
1137         return true;
1138       }
1139       if (!RC.contains(Reg))
1140         return diagnoseRegisterClass(A->RegisterName);
1141       Arg = ArgDescriptor::createRegister(Reg);
1142     } else
1143       Arg = ArgDescriptor::createStack(A->StackOffset);
1144     // Check and apply the optional mask.
1145     if (A->Mask)
1146       Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1147 
1148     MFI->NumUserSGPRs += UserSGPRs;
1149     MFI->NumSystemSGPRs += SystemSGPRs;
1150     return false;
1151   };
1152 
1153   if (YamlMFI.ArgInfo &&
1154       (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1155                              AMDGPU::SGPR_128RegClass,
1156                              MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1157        parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1158                              AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1159                              2, 0) ||
1160        parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1161                              MFI->ArgInfo.QueuePtr, 2, 0) ||
1162        parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1163                              AMDGPU::SReg_64RegClass,
1164                              MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1165        parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1166                              AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1167                              2, 0) ||
1168        parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1169                              AMDGPU::SReg_64RegClass,
1170                              MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1171        parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1172                              AMDGPU::SGPR_32RegClass,
1173                              MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1174        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1175                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1176                              0, 1) ||
1177        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1178                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1179                              0, 1) ||
1180        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1181                              AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1182                              0, 1) ||
1183        parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1184                              AMDGPU::SGPR_32RegClass,
1185                              MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1186        parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1187                              AMDGPU::SGPR_32RegClass,
1188                              MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1189        parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1190                              AMDGPU::SReg_64RegClass,
1191                              MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1192        parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1193                              AMDGPU::SReg_64RegClass,
1194                              MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1195        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1196                              AMDGPU::VGPR_32RegClass,
1197                              MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1198        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1199                              AMDGPU::VGPR_32RegClass,
1200                              MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1201        parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1202                              AMDGPU::VGPR_32RegClass,
1203                              MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1204     return true;
1205 
1206   MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1207   MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1208   MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals;
1209   MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals;
1210   MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals;
1211   MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals;
1212 
1213   return false;
1214 }
1215