1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The AMDGPU target machine contains all of the hardware specific 11 /// information needed to emit code for R600 and SI GPUs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUTargetMachine.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUAliasAnalysis.h" 18 #include "AMDGPUCallLowering.h" 19 #include "AMDGPUExportClustering.h" 20 #include "AMDGPUInstructionSelector.h" 21 #include "AMDGPULegalizerInfo.h" 22 #include "AMDGPUMacroFusion.h" 23 #include "AMDGPUTargetObjectFile.h" 24 #include "AMDGPUTargetTransformInfo.h" 25 #include "GCNIterativeScheduler.h" 26 #include "GCNSchedStrategy.h" 27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 28 #include "R600MachineScheduler.h" 29 #include "SIMachineFunctionInfo.h" 30 #include "SIMachineScheduler.h" 31 #include "TargetInfo/AMDGPUTargetInfo.h" 32 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 33 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 34 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 35 #include "llvm/CodeGen/GlobalISel/Localizer.h" 36 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 37 #include "llvm/CodeGen/MIRParser/MIParser.h" 38 #include "llvm/CodeGen/Passes.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/Function.h" 42 #include "llvm/IR/LegacyPassManager.h" 43 #include "llvm/InitializePasses.h" 44 #include "llvm/Pass.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Compiler.h" 47 #include "llvm/Support/TargetRegistry.h" 48 #include "llvm/Target/TargetLoweringObjectFile.h" 49 #include "llvm/Transforms/IPO.h" 50 #include "llvm/Transforms/IPO/AlwaysInliner.h" 51 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 52 #include "llvm/Transforms/Scalar.h" 53 #include "llvm/Transforms/Scalar/GVN.h" 54 #include "llvm/Transforms/Utils.h" 55 #include "llvm/Transforms/Vectorize.h" 56 #include <memory> 57 58 using namespace llvm; 59 60 static cl::opt<bool> EnableR600StructurizeCFG( 61 "r600-ir-structurize", 62 cl::desc("Use StructurizeCFG IR pass"), 63 cl::init(true)); 64 65 static cl::opt<bool> EnableSROA( 66 "amdgpu-sroa", 67 cl::desc("Run SROA after promote alloca pass"), 68 cl::ReallyHidden, 69 cl::init(true)); 70 71 static cl::opt<bool> 72 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, 73 cl::desc("Run early if-conversion"), 74 cl::init(false)); 75 76 static cl::opt<bool> 77 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, 78 cl::desc("Run pre-RA exec mask optimizations"), 79 cl::init(true)); 80 81 static cl::opt<bool> EnableR600IfConvert( 82 "r600-if-convert", 83 cl::desc("Use if conversion pass"), 84 cl::ReallyHidden, 85 cl::init(true)); 86 87 // Option to disable vectorizer for tests. 88 static cl::opt<bool> EnableLoadStoreVectorizer( 89 "amdgpu-load-store-vectorizer", 90 cl::desc("Enable load store vectorizer"), 91 cl::init(true), 92 cl::Hidden); 93 94 // Option to control global loads scalarization 95 static cl::opt<bool> ScalarizeGlobal( 96 "amdgpu-scalarize-global-loads", 97 cl::desc("Enable global load scalarization"), 98 cl::init(true), 99 cl::Hidden); 100 101 // Option to run internalize pass. 102 static cl::opt<bool> InternalizeSymbols( 103 "amdgpu-internalize-symbols", 104 cl::desc("Enable elimination of non-kernel functions and unused globals"), 105 cl::init(false), 106 cl::Hidden); 107 108 // Option to inline all early. 109 static cl::opt<bool> EarlyInlineAll( 110 "amdgpu-early-inline-all", 111 cl::desc("Inline all functions early"), 112 cl::init(false), 113 cl::Hidden); 114 115 static cl::opt<bool> EnableSDWAPeephole( 116 "amdgpu-sdwa-peephole", 117 cl::desc("Enable SDWA peepholer"), 118 cl::init(true)); 119 120 static cl::opt<bool> EnableDPPCombine( 121 "amdgpu-dpp-combine", 122 cl::desc("Enable DPP combiner"), 123 cl::init(true)); 124 125 // Enable address space based alias analysis 126 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, 127 cl::desc("Enable AMDGPU Alias Analysis"), 128 cl::init(true)); 129 130 // Option to run late CFG structurizer 131 static cl::opt<bool, true> LateCFGStructurize( 132 "amdgpu-late-structurize", 133 cl::desc("Enable late CFG structurization"), 134 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), 135 cl::Hidden); 136 137 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt( 138 "amdgpu-function-calls", 139 cl::desc("Enable AMDGPU function call support"), 140 cl::location(AMDGPUTargetMachine::EnableFunctionCalls), 141 cl::init(true), 142 cl::Hidden); 143 144 static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt( 145 "amdgpu-fixed-function-abi", 146 cl::desc("Enable all implicit function arguments"), 147 cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI), 148 cl::init(false), 149 cl::Hidden); 150 151 // Enable lib calls simplifications 152 static cl::opt<bool> EnableLibCallSimplify( 153 "amdgpu-simplify-libcall", 154 cl::desc("Enable amdgpu library simplifications"), 155 cl::init(true), 156 cl::Hidden); 157 158 static cl::opt<bool> EnableLowerKernelArguments( 159 "amdgpu-ir-lower-kernel-arguments", 160 cl::desc("Lower kernel argument loads in IR pass"), 161 cl::init(true), 162 cl::Hidden); 163 164 static cl::opt<bool> EnableRegReassign( 165 "amdgpu-reassign-regs", 166 cl::desc("Enable register reassign optimizations on gfx10+"), 167 cl::init(true), 168 cl::Hidden); 169 170 // Enable atomic optimization 171 static cl::opt<bool> EnableAtomicOptimizations( 172 "amdgpu-atomic-optimizations", 173 cl::desc("Enable atomic optimizations"), 174 cl::init(false), 175 cl::Hidden); 176 177 // Enable Mode register optimization 178 static cl::opt<bool> EnableSIModeRegisterPass( 179 "amdgpu-mode-register", 180 cl::desc("Enable mode register pass"), 181 cl::init(true), 182 cl::Hidden); 183 184 // Option is used in lit tests to prevent deadcoding of patterns inspected. 185 static cl::opt<bool> 186 EnableDCEInRA("amdgpu-dce-in-ra", 187 cl::init(true), cl::Hidden, 188 cl::desc("Enable machine DCE inside regalloc")); 189 190 static cl::opt<bool> EnableScalarIRPasses( 191 "amdgpu-scalar-ir-passes", 192 cl::desc("Enable scalar IR passes"), 193 cl::init(true), 194 cl::Hidden); 195 196 static cl::opt<bool> EnableStructurizerWorkarounds( 197 "amdgpu-enable-structurizer-workarounds", 198 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), 199 cl::Hidden); 200 201 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() { 202 // Register the target 203 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); 204 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); 205 206 PassRegistry *PR = PassRegistry::getPassRegistry(); 207 initializeR600ClauseMergePassPass(*PR); 208 initializeR600ControlFlowFinalizerPass(*PR); 209 initializeR600PacketizerPass(*PR); 210 initializeR600ExpandSpecialInstrsPassPass(*PR); 211 initializeR600VectorRegMergerPass(*PR); 212 initializeGlobalISel(*PR); 213 initializeAMDGPUDAGToDAGISelPass(*PR); 214 initializeGCNDPPCombinePass(*PR); 215 initializeSILowerI1CopiesPass(*PR); 216 initializeSILowerSGPRSpillsPass(*PR); 217 initializeSIFixSGPRCopiesPass(*PR); 218 initializeSIFixVGPRCopiesPass(*PR); 219 initializeSIFoldOperandsPass(*PR); 220 initializeSIPeepholeSDWAPass(*PR); 221 initializeSIShrinkInstructionsPass(*PR); 222 initializeSIOptimizeExecMaskingPreRAPass(*PR); 223 initializeSILoadStoreOptimizerPass(*PR); 224 initializeAMDGPUFixFunctionBitcastsPass(*PR); 225 initializeAMDGPUAlwaysInlinePass(*PR); 226 initializeAMDGPUAnnotateKernelFeaturesPass(*PR); 227 initializeAMDGPUAnnotateUniformValuesPass(*PR); 228 initializeAMDGPUArgumentUsageInfoPass(*PR); 229 initializeAMDGPUAtomicOptimizerPass(*PR); 230 initializeAMDGPULowerKernelArgumentsPass(*PR); 231 initializeAMDGPULowerKernelAttributesPass(*PR); 232 initializeAMDGPULowerIntrinsicsPass(*PR); 233 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); 234 initializeAMDGPUPostLegalizerCombinerPass(*PR); 235 initializeAMDGPUPreLegalizerCombinerPass(*PR); 236 initializeAMDGPUPromoteAllocaPass(*PR); 237 initializeAMDGPUPromoteAllocaToVectorPass(*PR); 238 initializeAMDGPUCodeGenPreparePass(*PR); 239 initializeAMDGPUPropagateAttributesEarlyPass(*PR); 240 initializeAMDGPUPropagateAttributesLatePass(*PR); 241 initializeAMDGPURewriteOutArgumentsPass(*PR); 242 initializeAMDGPUUnifyMetadataPass(*PR); 243 initializeSIAnnotateControlFlowPass(*PR); 244 initializeSIInsertHardClausesPass(*PR); 245 initializeSIInsertWaitcntsPass(*PR); 246 initializeSIModeRegisterPass(*PR); 247 initializeSIWholeQuadModePass(*PR); 248 initializeSILowerControlFlowPass(*PR); 249 initializeSIRemoveShortExecBranchesPass(*PR); 250 initializeSIPreEmitPeepholePass(*PR); 251 initializeSIInsertSkipsPass(*PR); 252 initializeSIMemoryLegalizerPass(*PR); 253 initializeSIOptimizeExecMaskingPass(*PR); 254 initializeSIPreAllocateWWMRegsPass(*PR); 255 initializeSIFormMemoryClausesPass(*PR); 256 initializeSIPostRABundlerPass(*PR); 257 initializeAMDGPUUnifyDivergentExitNodesPass(*PR); 258 initializeAMDGPUAAWrapperPassPass(*PR); 259 initializeAMDGPUExternalAAWrapperPass(*PR); 260 initializeAMDGPUUseNativeCallsPass(*PR); 261 initializeAMDGPUSimplifyLibCallsPass(*PR); 262 initializeAMDGPUInlinerPass(*PR); 263 initializeAMDGPUPrintfRuntimeBindingPass(*PR); 264 initializeGCNRegBankReassignPass(*PR); 265 initializeGCNNSAReassignPass(*PR); 266 initializeSIAddIMGInitPass(*PR); 267 } 268 269 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 270 return std::make_unique<AMDGPUTargetObjectFile>(); 271 } 272 273 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) { 274 return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>()); 275 } 276 277 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { 278 return new SIScheduleDAGMI(C); 279 } 280 281 static ScheduleDAGInstrs * 282 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 283 ScheduleDAGMILive *DAG = 284 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C)); 285 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 286 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 287 DAG->addMutation(createAMDGPUExportClusteringDAGMutation()); 288 return DAG; 289 } 290 291 static ScheduleDAGInstrs * 292 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 293 auto DAG = new GCNIterativeScheduler(C, 294 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); 295 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 296 return DAG; 297 } 298 299 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { 300 return new GCNIterativeScheduler(C, 301 GCNIterativeScheduler::SCHEDULE_MINREGFORCED); 302 } 303 304 static ScheduleDAGInstrs * 305 createIterativeILPMachineScheduler(MachineSchedContext *C) { 306 auto DAG = new GCNIterativeScheduler(C, 307 GCNIterativeScheduler::SCHEDULE_ILP); 308 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 309 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 310 return DAG; 311 } 312 313 static MachineSchedRegistry 314 R600SchedRegistry("r600", "Run R600's custom scheduler", 315 createR600MachineScheduler); 316 317 static MachineSchedRegistry 318 SISchedRegistry("si", "Run SI's custom scheduler", 319 createSIMachineScheduler); 320 321 static MachineSchedRegistry 322 GCNMaxOccupancySchedRegistry("gcn-max-occupancy", 323 "Run GCN scheduler to maximize occupancy", 324 createGCNMaxOccupancyMachineScheduler); 325 326 static MachineSchedRegistry 327 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", 328 "Run GCN scheduler to maximize occupancy (experimental)", 329 createIterativeGCNMaxOccupancyMachineScheduler); 330 331 static MachineSchedRegistry 332 GCNMinRegSchedRegistry("gcn-minreg", 333 "Run GCN iterative scheduler for minimal register usage (experimental)", 334 createMinRegScheduler); 335 336 static MachineSchedRegistry 337 GCNILPSchedRegistry("gcn-ilp", 338 "Run GCN iterative scheduler for ILP scheduling (experimental)", 339 createIterativeILPMachineScheduler); 340 341 static StringRef computeDataLayout(const Triple &TT) { 342 if (TT.getArch() == Triple::r600) { 343 // 32-bit pointers. 344 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 345 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"; 346 } 347 348 // 32-bit private, local, and region pointers. 64-bit global, constant and 349 // flat, non-integral buffer fat pointers. 350 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32" 351 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 352 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" 353 "-ni:7"; 354 } 355 356 LLVM_READNONE 357 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { 358 if (!GPU.empty()) 359 return GPU; 360 361 // Need to default to a target with flat support for HSA. 362 if (TT.getArch() == Triple::amdgcn) 363 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic"; 364 365 return "r600"; 366 } 367 368 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { 369 // The AMDGPU toolchain only supports generating shared objects, so we 370 // must always use PIC. 371 return Reloc::PIC_; 372 } 373 374 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, 375 StringRef CPU, StringRef FS, 376 TargetOptions Options, 377 Optional<Reloc::Model> RM, 378 Optional<CodeModel::Model> CM, 379 CodeGenOpt::Level OptLevel) 380 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), 381 FS, Options, getEffectiveRelocModel(RM), 382 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel), 383 TLOF(createTLOF(getTargetTriple())) { 384 initAsmInfo(); 385 if (TT.getArch() == Triple::amdgcn) { 386 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64")) 387 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64)); 388 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32")) 389 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32)); 390 } 391 } 392 393 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; 394 bool AMDGPUTargetMachine::EnableFunctionCalls = false; 395 bool AMDGPUTargetMachine::EnableFixedFunctionABI = false; 396 397 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; 398 399 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { 400 Attribute GPUAttr = F.getFnAttribute("target-cpu"); 401 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU(); 402 } 403 404 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { 405 Attribute FSAttr = F.getFnAttribute("target-features"); 406 407 return FSAttr.isValid() ? FSAttr.getValueAsString() 408 : getTargetFeatureString(); 409 } 410 411 /// Predicate for Internalize pass. 412 static bool mustPreserveGV(const GlobalValue &GV) { 413 if (const Function *F = dyn_cast<Function>(&GV)) 414 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); 415 416 return !GV.use_empty(); 417 } 418 419 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { 420 Builder.DivergentTarget = true; 421 422 bool EnableOpt = getOptLevel() > CodeGenOpt::None; 423 bool Internalize = InternalizeSymbols; 424 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; 425 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; 426 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; 427 428 if (EnableFunctionCalls) { 429 delete Builder.Inliner; 430 Builder.Inliner = createAMDGPUFunctionInliningPass(); 431 } 432 433 Builder.addExtension( 434 PassManagerBuilder::EP_ModuleOptimizerEarly, 435 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &, 436 legacy::PassManagerBase &PM) { 437 if (AMDGPUAA) { 438 PM.add(createAMDGPUAAWrapperPass()); 439 PM.add(createAMDGPUExternalAAWrapperPass()); 440 } 441 PM.add(createAMDGPUUnifyMetadataPass()); 442 PM.add(createAMDGPUPrintfRuntimeBinding()); 443 if (Internalize) 444 PM.add(createInternalizePass(mustPreserveGV)); 445 PM.add(createAMDGPUPropagateAttributesLatePass(this)); 446 if (Internalize) 447 PM.add(createGlobalDCEPass()); 448 if (EarlyInline) 449 PM.add(createAMDGPUAlwaysInlinePass(false)); 450 }); 451 452 Builder.addExtension( 453 PassManagerBuilder::EP_EarlyAsPossible, 454 [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &, 455 legacy::PassManagerBase &PM) { 456 if (AMDGPUAA) { 457 PM.add(createAMDGPUAAWrapperPass()); 458 PM.add(createAMDGPUExternalAAWrapperPass()); 459 } 460 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this)); 461 PM.add(llvm::createAMDGPUUseNativeCallsPass()); 462 if (LibCallSimplify) 463 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this)); 464 }); 465 466 Builder.addExtension( 467 PassManagerBuilder::EP_CGSCCOptimizerLate, 468 [EnableOpt](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 469 // Add infer address spaces pass to the opt pipeline after inlining 470 // but before SROA to increase SROA opportunities. 471 PM.add(createInferAddressSpacesPass()); 472 473 // This should run after inlining to have any chance of doing anything, 474 // and before other cleanup optimizations. 475 PM.add(createAMDGPULowerKernelAttributesPass()); 476 477 // Promote alloca to vector before SROA and loop unroll. If we manage 478 // to eliminate allocas before unroll we may choose to unroll less. 479 if (EnableOpt) 480 PM.add(createAMDGPUPromoteAllocaToVector()); 481 }); 482 483 Builder.addExtension( 484 PassManagerBuilder::EP_LoopOptimizerEnd, 485 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 486 // Add SROA after loop unrolling as more promotable patterns are 487 // exposed after small loops are fully unrolled. 488 PM.add(createSROAPass()); 489 }); 490 } 491 492 //===----------------------------------------------------------------------===// 493 // R600 Target Machine (R600 -> Cayman) 494 //===----------------------------------------------------------------------===// 495 496 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT, 497 StringRef CPU, StringRef FS, 498 TargetOptions Options, 499 Optional<Reloc::Model> RM, 500 Optional<CodeModel::Model> CM, 501 CodeGenOpt::Level OL, bool JIT) 502 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) { 503 setRequiresStructuredCFG(true); 504 505 // Override the default since calls aren't supported for r600. 506 if (EnableFunctionCalls && 507 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0) 508 EnableFunctionCalls = false; 509 } 510 511 const R600Subtarget *R600TargetMachine::getSubtargetImpl( 512 const Function &F) const { 513 StringRef GPU = getGPUName(F); 514 StringRef FS = getFeatureString(F); 515 516 SmallString<128> SubtargetKey(GPU); 517 SubtargetKey.append(FS); 518 519 auto &I = SubtargetMap[SubtargetKey]; 520 if (!I) { 521 // This needs to be done before we create a new subtarget since any 522 // creation will depend on the TM and the code generation flags on the 523 // function that reside in TargetOptions. 524 resetTargetOptions(F); 525 I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this); 526 } 527 528 return I.get(); 529 } 530 531 bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, 532 unsigned DestAS) const { 533 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) && 534 AMDGPU::isFlatGlobalAddrSpace(DestAS); 535 } 536 537 TargetTransformInfo 538 R600TargetMachine::getTargetTransformInfo(const Function &F) { 539 return TargetTransformInfo(R600TTIImpl(this, F)); 540 } 541 542 //===----------------------------------------------------------------------===// 543 // GCN Target Machine (SI+) 544 //===----------------------------------------------------------------------===// 545 546 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, 547 StringRef CPU, StringRef FS, 548 TargetOptions Options, 549 Optional<Reloc::Model> RM, 550 Optional<CodeModel::Model> CM, 551 CodeGenOpt::Level OL, bool JIT) 552 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} 553 554 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const { 555 StringRef GPU = getGPUName(F); 556 StringRef FS = getFeatureString(F); 557 558 SmallString<128> SubtargetKey(GPU); 559 SubtargetKey.append(FS); 560 561 auto &I = SubtargetMap[SubtargetKey]; 562 if (!I) { 563 // This needs to be done before we create a new subtarget since any 564 // creation will depend on the TM and the code generation flags on the 565 // function that reside in TargetOptions. 566 resetTargetOptions(F); 567 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this); 568 } 569 570 I->setScalarizeGlobalBehavior(ScalarizeGlobal); 571 572 return I.get(); 573 } 574 575 TargetTransformInfo 576 GCNTargetMachine::getTargetTransformInfo(const Function &F) { 577 return TargetTransformInfo(GCNTTIImpl(this, F)); 578 } 579 580 //===----------------------------------------------------------------------===// 581 // AMDGPU Pass Setup 582 //===----------------------------------------------------------------------===// 583 584 namespace { 585 586 class AMDGPUPassConfig : public TargetPassConfig { 587 public: 588 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 589 : TargetPassConfig(TM, PM) { 590 // Exceptions and StackMaps are not supported, so these passes will never do 591 // anything. 592 disablePass(&StackMapLivenessID); 593 disablePass(&FuncletLayoutID); 594 } 595 596 AMDGPUTargetMachine &getAMDGPUTargetMachine() const { 597 return getTM<AMDGPUTargetMachine>(); 598 } 599 600 ScheduleDAGInstrs * 601 createMachineScheduler(MachineSchedContext *C) const override { 602 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 603 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 604 return DAG; 605 } 606 607 void addEarlyCSEOrGVNPass(); 608 void addStraightLineScalarOptimizationPasses(); 609 void addIRPasses() override; 610 void addCodeGenPrepare() override; 611 bool addPreISel() override; 612 bool addInstSelector() override; 613 bool addGCPasses() override; 614 615 std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 616 }; 617 618 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const { 619 return getStandardCSEConfigForOpt(TM->getOptLevel()); 620 } 621 622 class R600PassConfig final : public AMDGPUPassConfig { 623 public: 624 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 625 : AMDGPUPassConfig(TM, PM) {} 626 627 ScheduleDAGInstrs *createMachineScheduler( 628 MachineSchedContext *C) const override { 629 return createR600MachineScheduler(C); 630 } 631 632 bool addPreISel() override; 633 bool addInstSelector() override; 634 void addPreRegAlloc() override; 635 void addPreSched2() override; 636 void addPreEmitPass() override; 637 }; 638 639 class GCNPassConfig final : public AMDGPUPassConfig { 640 public: 641 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 642 : AMDGPUPassConfig(TM, PM) { 643 // It is necessary to know the register usage of the entire call graph. We 644 // allow calls without EnableAMDGPUFunctionCalls if they are marked 645 // noinline, so this is always required. 646 setRequiresCodeGenSCCOrder(true); 647 } 648 649 GCNTargetMachine &getGCNTargetMachine() const { 650 return getTM<GCNTargetMachine>(); 651 } 652 653 ScheduleDAGInstrs * 654 createMachineScheduler(MachineSchedContext *C) const override; 655 656 bool addPreISel() override; 657 void addMachineSSAOptimization() override; 658 bool addILPOpts() override; 659 bool addInstSelector() override; 660 bool addIRTranslator() override; 661 void addPreLegalizeMachineIR() override; 662 bool addLegalizeMachineIR() override; 663 void addPreRegBankSelect() override; 664 bool addRegBankSelect() override; 665 bool addGlobalInstructionSelect() override; 666 void addFastRegAlloc() override; 667 void addOptimizedRegAlloc() override; 668 void addPreRegAlloc() override; 669 bool addPreRewrite() override; 670 void addPostRegAlloc() override; 671 void addPreSched2() override; 672 void addPreEmitPass() override; 673 }; 674 675 } // end anonymous namespace 676 677 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { 678 if (getOptLevel() == CodeGenOpt::Aggressive) 679 addPass(createGVNPass()); 680 else 681 addPass(createEarlyCSEPass()); 682 } 683 684 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { 685 addPass(createLICMPass()); 686 addPass(createSeparateConstOffsetFromGEPPass()); 687 addPass(createSpeculativeExecutionPass()); 688 // ReassociateGEPs exposes more opportunites for SLSR. See 689 // the example in reassociate-geps-and-slsr.ll. 690 addPass(createStraightLineStrengthReducePass()); 691 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or 692 // EarlyCSE can reuse. 693 addEarlyCSEOrGVNPass(); 694 // Run NaryReassociate after EarlyCSE/GVN to be more effective. 695 addPass(createNaryReassociatePass()); 696 // NaryReassociate on GEPs creates redundant common expressions, so run 697 // EarlyCSE after it. 698 addPass(createEarlyCSEPass()); 699 } 700 701 void AMDGPUPassConfig::addIRPasses() { 702 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); 703 704 // There is no reason to run these. 705 disablePass(&StackMapLivenessID); 706 disablePass(&FuncletLayoutID); 707 disablePass(&PatchableFunctionID); 708 709 addPass(createAMDGPUPrintfRuntimeBinding()); 710 711 // This must occur before inlining, as the inliner will not look through 712 // bitcast calls. 713 addPass(createAMDGPUFixFunctionBitcastsPass()); 714 715 // A call to propagate attributes pass in the backend in case opt was not run. 716 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM)); 717 718 addPass(createAtomicExpandPass()); 719 720 721 addPass(createAMDGPULowerIntrinsicsPass()); 722 723 // Function calls are not supported, so make sure we inline everything. 724 addPass(createAMDGPUAlwaysInlinePass()); 725 addPass(createAlwaysInlinerLegacyPass()); 726 // We need to add the barrier noop pass, otherwise adding the function 727 // inlining pass will cause all of the PassConfigs passes to be run 728 // one function at a time, which means if we have a nodule with two 729 // functions, then we will generate code for the first function 730 // without ever running any passes on the second. 731 addPass(createBarrierNoopPass()); 732 733 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. 734 if (TM.getTargetTriple().getArch() == Triple::r600) 735 addPass(createR600OpenCLImageTypeLoweringPass()); 736 737 // Replace OpenCL enqueued block function pointers with global variables. 738 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); 739 740 if (TM.getOptLevel() > CodeGenOpt::None) { 741 addPass(createInferAddressSpacesPass()); 742 addPass(createAMDGPUPromoteAlloca()); 743 744 if (EnableSROA) 745 addPass(createSROAPass()); 746 747 if (EnableScalarIRPasses) 748 addStraightLineScalarOptimizationPasses(); 749 750 if (EnableAMDGPUAliasAnalysis) { 751 addPass(createAMDGPUAAWrapperPass()); 752 addPass(createExternalAAWrapperPass([](Pass &P, Function &, 753 AAResults &AAR) { 754 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) 755 AAR.addAAResult(WrapperPass->getResult()); 756 })); 757 } 758 } 759 760 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 761 // TODO: May want to move later or split into an early and late one. 762 addPass(createAMDGPUCodeGenPreparePass()); 763 } 764 765 TargetPassConfig::addIRPasses(); 766 767 // EarlyCSE is not always strong enough to clean up what LSR produces. For 768 // example, GVN can combine 769 // 770 // %0 = add %a, %b 771 // %1 = add %b, %a 772 // 773 // and 774 // 775 // %0 = shl nsw %a, 2 776 // %1 = shl %a, 2 777 // 778 // but EarlyCSE can do neither of them. 779 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses) 780 addEarlyCSEOrGVNPass(); 781 } 782 783 void AMDGPUPassConfig::addCodeGenPrepare() { 784 if (TM->getTargetTriple().getArch() == Triple::amdgcn) 785 addPass(createAMDGPUAnnotateKernelFeaturesPass()); 786 787 if (TM->getTargetTriple().getArch() == Triple::amdgcn && 788 EnableLowerKernelArguments) 789 addPass(createAMDGPULowerKernelArgumentsPass()); 790 791 addPass(&AMDGPUPerfHintAnalysisID); 792 793 TargetPassConfig::addCodeGenPrepare(); 794 795 if (EnableLoadStoreVectorizer) 796 addPass(createLoadStoreVectorizerPass()); 797 798 // LowerSwitch pass may introduce unreachable blocks that can 799 // cause unexpected behavior for subsequent passes. Placing it 800 // here seems better that these blocks would get cleaned up by 801 // UnreachableBlockElim inserted next in the pass flow. 802 addPass(createLowerSwitchPass()); 803 } 804 805 bool AMDGPUPassConfig::addPreISel() { 806 addPass(createFlattenCFGPass()); 807 return false; 808 } 809 810 bool AMDGPUPassConfig::addInstSelector() { 811 // Defer the verifier until FinalizeISel. 812 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false); 813 return false; 814 } 815 816 bool AMDGPUPassConfig::addGCPasses() { 817 // Do nothing. GC is not supported. 818 return false; 819 } 820 821 //===----------------------------------------------------------------------===// 822 // R600 Pass Setup 823 //===----------------------------------------------------------------------===// 824 825 bool R600PassConfig::addPreISel() { 826 AMDGPUPassConfig::addPreISel(); 827 828 if (EnableR600StructurizeCFG) 829 addPass(createStructurizeCFGPass()); 830 return false; 831 } 832 833 bool R600PassConfig::addInstSelector() { 834 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel())); 835 return false; 836 } 837 838 void R600PassConfig::addPreRegAlloc() { 839 addPass(createR600VectorRegMerger()); 840 } 841 842 void R600PassConfig::addPreSched2() { 843 addPass(createR600EmitClauseMarkers(), false); 844 if (EnableR600IfConvert) 845 addPass(&IfConverterID, false); 846 addPass(createR600ClauseMergePass(), false); 847 } 848 849 void R600PassConfig::addPreEmitPass() { 850 addPass(createAMDGPUCFGStructurizerPass(), false); 851 addPass(createR600ExpandSpecialInstrsPass(), false); 852 addPass(&FinalizeMachineBundlesID, false); 853 addPass(createR600Packetizer(), false); 854 addPass(createR600ControlFlowFinalizer(), false); 855 } 856 857 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) { 858 return new R600PassConfig(*this, PM); 859 } 860 861 //===----------------------------------------------------------------------===// 862 // GCN Pass Setup 863 //===----------------------------------------------------------------------===// 864 865 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( 866 MachineSchedContext *C) const { 867 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>(); 868 if (ST.enableSIScheduler()) 869 return createSIMachineScheduler(C); 870 return createGCNMaxOccupancyMachineScheduler(C); 871 } 872 873 bool GCNPassConfig::addPreISel() { 874 AMDGPUPassConfig::addPreISel(); 875 876 if (EnableAtomicOptimizations) { 877 addPass(createAMDGPUAtomicOptimizerPass()); 878 } 879 880 // FIXME: We need to run a pass to propagate the attributes when calls are 881 // supported. 882 883 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit 884 // regions formed by them. 885 addPass(&AMDGPUUnifyDivergentExitNodesID); 886 if (!LateCFGStructurize) { 887 if (EnableStructurizerWorkarounds) { 888 addPass(createFixIrreduciblePass()); 889 addPass(createUnifyLoopExitsPass()); 890 } 891 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions 892 } 893 addPass(createSinkingPass()); 894 addPass(createAMDGPUAnnotateUniformValues()); 895 if (!LateCFGStructurize) { 896 addPass(createSIAnnotateControlFlowPass()); 897 } 898 addPass(createLCSSAPass()); 899 900 return false; 901 } 902 903 void GCNPassConfig::addMachineSSAOptimization() { 904 TargetPassConfig::addMachineSSAOptimization(); 905 906 // We want to fold operands after PeepholeOptimizer has run (or as part of 907 // it), because it will eliminate extra copies making it easier to fold the 908 // real source operand. We want to eliminate dead instructions after, so that 909 // we see fewer uses of the copies. We then need to clean up the dead 910 // instructions leftover after the operands are folded as well. 911 // 912 // XXX - Can we get away without running DeadMachineInstructionElim again? 913 addPass(&SIFoldOperandsID); 914 if (EnableDPPCombine) 915 addPass(&GCNDPPCombineID); 916 addPass(&DeadMachineInstructionElimID); 917 addPass(&SILoadStoreOptimizerID); 918 if (EnableSDWAPeephole) { 919 addPass(&SIPeepholeSDWAID); 920 addPass(&EarlyMachineLICMID); 921 addPass(&MachineCSEID); 922 addPass(&SIFoldOperandsID); 923 addPass(&DeadMachineInstructionElimID); 924 } 925 addPass(createSIShrinkInstructionsPass()); 926 } 927 928 bool GCNPassConfig::addILPOpts() { 929 if (EnableEarlyIfConversion) 930 addPass(&EarlyIfConverterID); 931 932 TargetPassConfig::addILPOpts(); 933 return false; 934 } 935 936 bool GCNPassConfig::addInstSelector() { 937 AMDGPUPassConfig::addInstSelector(); 938 addPass(&SIFixSGPRCopiesID); 939 addPass(createSILowerI1CopiesPass()); 940 addPass(createSIAddIMGInitPass()); 941 return false; 942 } 943 944 bool GCNPassConfig::addIRTranslator() { 945 addPass(new IRTranslator(getOptLevel())); 946 return false; 947 } 948 949 void GCNPassConfig::addPreLegalizeMachineIR() { 950 bool IsOptNone = getOptLevel() == CodeGenOpt::None; 951 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone)); 952 addPass(new Localizer()); 953 } 954 955 bool GCNPassConfig::addLegalizeMachineIR() { 956 addPass(new Legalizer()); 957 return false; 958 } 959 960 void GCNPassConfig::addPreRegBankSelect() { 961 bool IsOptNone = getOptLevel() == CodeGenOpt::None; 962 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone)); 963 } 964 965 bool GCNPassConfig::addRegBankSelect() { 966 addPass(new RegBankSelect()); 967 return false; 968 } 969 970 bool GCNPassConfig::addGlobalInstructionSelect() { 971 addPass(new InstructionSelect()); 972 return false; 973 } 974 975 void GCNPassConfig::addPreRegAlloc() { 976 if (LateCFGStructurize) { 977 addPass(createAMDGPUMachineCFGStructurizerPass()); 978 } 979 addPass(createSIWholeQuadModePass()); 980 } 981 982 void GCNPassConfig::addFastRegAlloc() { 983 // FIXME: We have to disable the verifier here because of PHIElimination + 984 // TwoAddressInstructions disabling it. 985 986 // This must be run immediately after phi elimination and before 987 // TwoAddressInstructions, otherwise the processing of the tied operand of 988 // SI_ELSE will introduce a copy of the tied operand source after the else. 989 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 990 991 // This must be run just after RegisterCoalescing. 992 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); 993 994 TargetPassConfig::addFastRegAlloc(); 995 } 996 997 void GCNPassConfig::addOptimizedRegAlloc() { 998 if (OptExecMaskPreRA) 999 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); 1000 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID); 1001 1002 // This must be run immediately after phi elimination and before 1003 // TwoAddressInstructions, otherwise the processing of the tied operand of 1004 // SI_ELSE will introduce a copy of the tied operand source after the else. 1005 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 1006 1007 // This must be run just after RegisterCoalescing. 1008 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); 1009 1010 if (EnableDCEInRA) 1011 insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID); 1012 1013 TargetPassConfig::addOptimizedRegAlloc(); 1014 } 1015 1016 bool GCNPassConfig::addPreRewrite() { 1017 if (EnableRegReassign) { 1018 addPass(&GCNNSAReassignID); 1019 addPass(&GCNRegBankReassignID); 1020 } 1021 return true; 1022 } 1023 1024 void GCNPassConfig::addPostRegAlloc() { 1025 addPass(&SIFixVGPRCopiesID); 1026 if (getOptLevel() > CodeGenOpt::None) 1027 addPass(&SIOptimizeExecMaskingID); 1028 TargetPassConfig::addPostRegAlloc(); 1029 1030 // Equivalent of PEI for SGPRs. 1031 addPass(&SILowerSGPRSpillsID); 1032 } 1033 1034 void GCNPassConfig::addPreSched2() { 1035 addPass(&SIPostRABundlerID); 1036 } 1037 1038 void GCNPassConfig::addPreEmitPass() { 1039 addPass(createSIMemoryLegalizerPass()); 1040 addPass(createSIInsertWaitcntsPass()); 1041 addPass(createSIShrinkInstructionsPass()); 1042 addPass(createSIModeRegisterPass()); 1043 1044 // The hazard recognizer that runs as part of the post-ra scheduler does not 1045 // guarantee to be able handle all hazards correctly. This is because if there 1046 // are multiple scheduling regions in a basic block, the regions are scheduled 1047 // bottom up, so when we begin to schedule a region we don't know what 1048 // instructions were emitted directly before it. 1049 // 1050 // Here we add a stand-alone hazard recognizer pass which can handle all 1051 // cases. 1052 // 1053 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would 1054 // be better for it to emit S_NOP <N> when possible. 1055 addPass(&PostRAHazardRecognizerID); 1056 if (getOptLevel() > CodeGenOpt::None) 1057 addPass(&SIInsertHardClausesID); 1058 1059 addPass(&SIRemoveShortExecBranchesID); 1060 addPass(&SIInsertSkipsPassID); 1061 addPass(&SIPreEmitPeepholeID); 1062 addPass(&BranchRelaxationPassID); 1063 } 1064 1065 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { 1066 return new GCNPassConfig(*this, PM); 1067 } 1068 1069 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const { 1070 return new yaml::SIMachineFunctionInfo(); 1071 } 1072 1073 yaml::MachineFunctionInfo * 1074 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 1075 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1076 return new yaml::SIMachineFunctionInfo(*MFI, 1077 *MF.getSubtarget().getRegisterInfo()); 1078 } 1079 1080 bool GCNTargetMachine::parseMachineFunctionInfo( 1081 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS, 1082 SMDiagnostic &Error, SMRange &SourceRange) const { 1083 const yaml::SIMachineFunctionInfo &YamlMFI = 1084 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_); 1085 MachineFunction &MF = PFS.MF; 1086 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1087 1088 MFI->initializeBaseYamlFields(YamlMFI); 1089 1090 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) { 1091 Register TempReg; 1092 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) { 1093 SourceRange = RegName.SourceRange; 1094 return true; 1095 } 1096 RegVal = TempReg; 1097 1098 return false; 1099 }; 1100 1101 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) { 1102 // Create a diagnostic for a the register string literal. 1103 const MemoryBuffer &Buffer = 1104 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); 1105 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1106 RegName.Value.size(), SourceMgr::DK_Error, 1107 "incorrect register class for field", RegName.Value, 1108 None, None); 1109 SourceRange = RegName.SourceRange; 1110 return true; 1111 }; 1112 1113 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) || 1114 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) || 1115 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg)) 1116 return true; 1117 1118 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG && 1119 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) { 1120 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg); 1121 } 1122 1123 if (MFI->FrameOffsetReg != AMDGPU::FP_REG && 1124 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) { 1125 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg); 1126 } 1127 1128 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG && 1129 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) { 1130 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg); 1131 } 1132 1133 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A, 1134 const TargetRegisterClass &RC, 1135 ArgDescriptor &Arg, unsigned UserSGPRs, 1136 unsigned SystemSGPRs) { 1137 // Skip parsing if it's not present. 1138 if (!A) 1139 return false; 1140 1141 if (A->IsRegister) { 1142 Register Reg; 1143 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) { 1144 SourceRange = A->RegisterName.SourceRange; 1145 return true; 1146 } 1147 if (!RC.contains(Reg)) 1148 return diagnoseRegisterClass(A->RegisterName); 1149 Arg = ArgDescriptor::createRegister(Reg); 1150 } else 1151 Arg = ArgDescriptor::createStack(A->StackOffset); 1152 // Check and apply the optional mask. 1153 if (A->Mask) 1154 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue()); 1155 1156 MFI->NumUserSGPRs += UserSGPRs; 1157 MFI->NumSystemSGPRs += SystemSGPRs; 1158 return false; 1159 }; 1160 1161 if (YamlMFI.ArgInfo && 1162 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer, 1163 AMDGPU::SGPR_128RegClass, 1164 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) || 1165 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr, 1166 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr, 1167 2, 0) || 1168 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass, 1169 MFI->ArgInfo.QueuePtr, 2, 0) || 1170 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr, 1171 AMDGPU::SReg_64RegClass, 1172 MFI->ArgInfo.KernargSegmentPtr, 2, 0) || 1173 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID, 1174 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID, 1175 2, 0) || 1176 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit, 1177 AMDGPU::SReg_64RegClass, 1178 MFI->ArgInfo.FlatScratchInit, 2, 0) || 1179 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize, 1180 AMDGPU::SGPR_32RegClass, 1181 MFI->ArgInfo.PrivateSegmentSize, 0, 0) || 1182 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX, 1183 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX, 1184 0, 1) || 1185 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY, 1186 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY, 1187 0, 1) || 1188 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ, 1189 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ, 1190 0, 1) || 1191 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo, 1192 AMDGPU::SGPR_32RegClass, 1193 MFI->ArgInfo.WorkGroupInfo, 0, 1) || 1194 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset, 1195 AMDGPU::SGPR_32RegClass, 1196 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) || 1197 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr, 1198 AMDGPU::SReg_64RegClass, 1199 MFI->ArgInfo.ImplicitArgPtr, 0, 0) || 1200 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr, 1201 AMDGPU::SReg_64RegClass, 1202 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) || 1203 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX, 1204 AMDGPU::VGPR_32RegClass, 1205 MFI->ArgInfo.WorkItemIDX, 0, 0) || 1206 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY, 1207 AMDGPU::VGPR_32RegClass, 1208 MFI->ArgInfo.WorkItemIDY, 0, 0) || 1209 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ, 1210 AMDGPU::VGPR_32RegClass, 1211 MFI->ArgInfo.WorkItemIDZ, 0, 0))) 1212 return true; 1213 1214 MFI->Mode.IEEE = YamlMFI.Mode.IEEE; 1215 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp; 1216 MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals; 1217 MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals; 1218 MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals; 1219 MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals; 1220 1221 return false; 1222 } 1223