1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The AMDGPU target machine contains all of the hardware specific 11 /// information needed to emit code for R600 and SI GPUs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUTargetMachine.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUAliasAnalysis.h" 18 #include "AMDGPUCallLowering.h" 19 #include "AMDGPUInstructionSelector.h" 20 #include "AMDGPULegalizerInfo.h" 21 #include "AMDGPUMacroFusion.h" 22 #include "AMDGPUTargetObjectFile.h" 23 #include "AMDGPUTargetTransformInfo.h" 24 #include "GCNIterativeScheduler.h" 25 #include "GCNSchedStrategy.h" 26 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 27 #include "R600MachineScheduler.h" 28 #include "SIMachineFunctionInfo.h" 29 #include "SIMachineScheduler.h" 30 #include "TargetInfo/AMDGPUTargetInfo.h" 31 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 32 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 33 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 34 #include "llvm/CodeGen/GlobalISel/Localizer.h" 35 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 36 #include "llvm/CodeGen/MIRParser/MIParser.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/IR/Attributes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/LegacyPassManager.h" 42 #include "llvm/InitializePasses.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Compiler.h" 46 #include "llvm/Support/TargetRegistry.h" 47 #include "llvm/Target/TargetLoweringObjectFile.h" 48 #include "llvm/Transforms/IPO.h" 49 #include "llvm/Transforms/IPO/AlwaysInliner.h" 50 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 51 #include "llvm/Transforms/Scalar.h" 52 #include "llvm/Transforms/Scalar/GVN.h" 53 #include "llvm/Transforms/Utils.h" 54 #include "llvm/Transforms/Vectorize.h" 55 #include <memory> 56 57 using namespace llvm; 58 59 static cl::opt<bool> EnableR600StructurizeCFG( 60 "r600-ir-structurize", 61 cl::desc("Use StructurizeCFG IR pass"), 62 cl::init(true)); 63 64 static cl::opt<bool> EnableSROA( 65 "amdgpu-sroa", 66 cl::desc("Run SROA after promote alloca pass"), 67 cl::ReallyHidden, 68 cl::init(true)); 69 70 static cl::opt<bool> 71 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, 72 cl::desc("Run early if-conversion"), 73 cl::init(false)); 74 75 static cl::opt<bool> 76 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, 77 cl::desc("Run pre-RA exec mask optimizations"), 78 cl::init(true)); 79 80 static cl::opt<bool> EnableR600IfConvert( 81 "r600-if-convert", 82 cl::desc("Use if conversion pass"), 83 cl::ReallyHidden, 84 cl::init(true)); 85 86 // Option to disable vectorizer for tests. 87 static cl::opt<bool> EnableLoadStoreVectorizer( 88 "amdgpu-load-store-vectorizer", 89 cl::desc("Enable load store vectorizer"), 90 cl::init(true), 91 cl::Hidden); 92 93 // Option to control global loads scalarization 94 static cl::opt<bool> ScalarizeGlobal( 95 "amdgpu-scalarize-global-loads", 96 cl::desc("Enable global load scalarization"), 97 cl::init(true), 98 cl::Hidden); 99 100 // Option to run internalize pass. 101 static cl::opt<bool> InternalizeSymbols( 102 "amdgpu-internalize-symbols", 103 cl::desc("Enable elimination of non-kernel functions and unused globals"), 104 cl::init(false), 105 cl::Hidden); 106 107 // Option to inline all early. 108 static cl::opt<bool> EarlyInlineAll( 109 "amdgpu-early-inline-all", 110 cl::desc("Inline all functions early"), 111 cl::init(false), 112 cl::Hidden); 113 114 static cl::opt<bool> EnableSDWAPeephole( 115 "amdgpu-sdwa-peephole", 116 cl::desc("Enable SDWA peepholer"), 117 cl::init(true)); 118 119 static cl::opt<bool> EnableDPPCombine( 120 "amdgpu-dpp-combine", 121 cl::desc("Enable DPP combiner"), 122 cl::init(true)); 123 124 // Enable address space based alias analysis 125 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, 126 cl::desc("Enable AMDGPU Alias Analysis"), 127 cl::init(true)); 128 129 // Option to run late CFG structurizer 130 static cl::opt<bool, true> LateCFGStructurize( 131 "amdgpu-late-structurize", 132 cl::desc("Enable late CFG structurization"), 133 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), 134 cl::Hidden); 135 136 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt( 137 "amdgpu-function-calls", 138 cl::desc("Enable AMDGPU function call support"), 139 cl::location(AMDGPUTargetMachine::EnableFunctionCalls), 140 cl::init(true), 141 cl::Hidden); 142 143 static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt( 144 "amdgpu-fixed-function-abi", 145 cl::desc("Enable all implicit function arguments"), 146 cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI), 147 cl::init(false), 148 cl::Hidden); 149 150 // Enable lib calls simplifications 151 static cl::opt<bool> EnableLibCallSimplify( 152 "amdgpu-simplify-libcall", 153 cl::desc("Enable amdgpu library simplifications"), 154 cl::init(true), 155 cl::Hidden); 156 157 static cl::opt<bool> EnableLowerKernelArguments( 158 "amdgpu-ir-lower-kernel-arguments", 159 cl::desc("Lower kernel argument loads in IR pass"), 160 cl::init(true), 161 cl::Hidden); 162 163 static cl::opt<bool> EnableRegReassign( 164 "amdgpu-reassign-regs", 165 cl::desc("Enable register reassign optimizations on gfx10+"), 166 cl::init(true), 167 cl::Hidden); 168 169 // Enable atomic optimization 170 static cl::opt<bool> EnableAtomicOptimizations( 171 "amdgpu-atomic-optimizations", 172 cl::desc("Enable atomic optimizations"), 173 cl::init(false), 174 cl::Hidden); 175 176 // Enable Mode register optimization 177 static cl::opt<bool> EnableSIModeRegisterPass( 178 "amdgpu-mode-register", 179 cl::desc("Enable mode register pass"), 180 cl::init(true), 181 cl::Hidden); 182 183 // Option is used in lit tests to prevent deadcoding of patterns inspected. 184 static cl::opt<bool> 185 EnableDCEInRA("amdgpu-dce-in-ra", 186 cl::init(true), cl::Hidden, 187 cl::desc("Enable machine DCE inside regalloc")); 188 189 static cl::opt<bool> EnableScalarIRPasses( 190 "amdgpu-scalar-ir-passes", 191 cl::desc("Enable scalar IR passes"), 192 cl::init(true), 193 cl::Hidden); 194 195 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() { 196 // Register the target 197 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); 198 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); 199 200 PassRegistry *PR = PassRegistry::getPassRegistry(); 201 initializeR600ClauseMergePassPass(*PR); 202 initializeR600ControlFlowFinalizerPass(*PR); 203 initializeR600PacketizerPass(*PR); 204 initializeR600ExpandSpecialInstrsPassPass(*PR); 205 initializeR600VectorRegMergerPass(*PR); 206 initializeGlobalISel(*PR); 207 initializeAMDGPUDAGToDAGISelPass(*PR); 208 initializeGCNDPPCombinePass(*PR); 209 initializeSILowerI1CopiesPass(*PR); 210 initializeSILowerSGPRSpillsPass(*PR); 211 initializeSIFixSGPRCopiesPass(*PR); 212 initializeSIFixVGPRCopiesPass(*PR); 213 initializeSIFixupVectorISelPass(*PR); 214 initializeSIFoldOperandsPass(*PR); 215 initializeSIPeepholeSDWAPass(*PR); 216 initializeSIShrinkInstructionsPass(*PR); 217 initializeSIOptimizeExecMaskingPreRAPass(*PR); 218 initializeSILoadStoreOptimizerPass(*PR); 219 initializeAMDGPUFixFunctionBitcastsPass(*PR); 220 initializeAMDGPUAlwaysInlinePass(*PR); 221 initializeAMDGPUAnnotateKernelFeaturesPass(*PR); 222 initializeAMDGPUAnnotateUniformValuesPass(*PR); 223 initializeAMDGPUArgumentUsageInfoPass(*PR); 224 initializeAMDGPUAtomicOptimizerPass(*PR); 225 initializeAMDGPULowerKernelArgumentsPass(*PR); 226 initializeAMDGPULowerKernelAttributesPass(*PR); 227 initializeAMDGPULowerIntrinsicsPass(*PR); 228 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); 229 initializeAMDGPUPostLegalizerCombinerPass(*PR); 230 initializeAMDGPUPreLegalizerCombinerPass(*PR); 231 initializeAMDGPUPromoteAllocaPass(*PR); 232 initializeAMDGPUCodeGenPreparePass(*PR); 233 initializeAMDGPUPropagateAttributesEarlyPass(*PR); 234 initializeAMDGPUPropagateAttributesLatePass(*PR); 235 initializeAMDGPURewriteOutArgumentsPass(*PR); 236 initializeAMDGPUUnifyMetadataPass(*PR); 237 initializeSIAnnotateControlFlowPass(*PR); 238 initializeSIInsertWaitcntsPass(*PR); 239 initializeSIModeRegisterPass(*PR); 240 initializeSIWholeQuadModePass(*PR); 241 initializeSILowerControlFlowPass(*PR); 242 initializeSIRemoveShortExecBranchesPass(*PR); 243 initializeSIInsertSkipsPass(*PR); 244 initializeSIMemoryLegalizerPass(*PR); 245 initializeSIOptimizeExecMaskingPass(*PR); 246 initializeSIPreAllocateWWMRegsPass(*PR); 247 initializeSIFormMemoryClausesPass(*PR); 248 initializeSIPostRABundlerPass(*PR); 249 initializeAMDGPUUnifyDivergentExitNodesPass(*PR); 250 initializeAMDGPUAAWrapperPassPass(*PR); 251 initializeAMDGPUExternalAAWrapperPass(*PR); 252 initializeAMDGPUUseNativeCallsPass(*PR); 253 initializeAMDGPUSimplifyLibCallsPass(*PR); 254 initializeAMDGPUInlinerPass(*PR); 255 initializeAMDGPUPrintfRuntimeBindingPass(*PR); 256 initializeGCNRegBankReassignPass(*PR); 257 initializeGCNNSAReassignPass(*PR); 258 initializeSIAddIMGInitPass(*PR); 259 } 260 261 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 262 return std::make_unique<AMDGPUTargetObjectFile>(); 263 } 264 265 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) { 266 return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>()); 267 } 268 269 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { 270 return new SIScheduleDAGMI(C); 271 } 272 273 static ScheduleDAGInstrs * 274 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 275 ScheduleDAGMILive *DAG = 276 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C)); 277 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 278 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 279 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 280 return DAG; 281 } 282 283 static ScheduleDAGInstrs * 284 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 285 auto DAG = new GCNIterativeScheduler(C, 286 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); 287 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 288 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 289 return DAG; 290 } 291 292 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { 293 return new GCNIterativeScheduler(C, 294 GCNIterativeScheduler::SCHEDULE_MINREGFORCED); 295 } 296 297 static ScheduleDAGInstrs * 298 createIterativeILPMachineScheduler(MachineSchedContext *C) { 299 auto DAG = new GCNIterativeScheduler(C, 300 GCNIterativeScheduler::SCHEDULE_ILP); 301 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 302 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 303 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 304 return DAG; 305 } 306 307 static MachineSchedRegistry 308 R600SchedRegistry("r600", "Run R600's custom scheduler", 309 createR600MachineScheduler); 310 311 static MachineSchedRegistry 312 SISchedRegistry("si", "Run SI's custom scheduler", 313 createSIMachineScheduler); 314 315 static MachineSchedRegistry 316 GCNMaxOccupancySchedRegistry("gcn-max-occupancy", 317 "Run GCN scheduler to maximize occupancy", 318 createGCNMaxOccupancyMachineScheduler); 319 320 static MachineSchedRegistry 321 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", 322 "Run GCN scheduler to maximize occupancy (experimental)", 323 createIterativeGCNMaxOccupancyMachineScheduler); 324 325 static MachineSchedRegistry 326 GCNMinRegSchedRegistry("gcn-minreg", 327 "Run GCN iterative scheduler for minimal register usage (experimental)", 328 createMinRegScheduler); 329 330 static MachineSchedRegistry 331 GCNILPSchedRegistry("gcn-ilp", 332 "Run GCN iterative scheduler for ILP scheduling (experimental)", 333 createIterativeILPMachineScheduler); 334 335 static StringRef computeDataLayout(const Triple &TT) { 336 if (TT.getArch() == Triple::r600) { 337 // 32-bit pointers. 338 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 339 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"; 340 } 341 342 // 32-bit private, local, and region pointers. 64-bit global, constant and 343 // flat, non-integral buffer fat pointers. 344 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32" 345 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 346 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" 347 "-ni:7"; 348 } 349 350 LLVM_READNONE 351 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { 352 if (!GPU.empty()) 353 return GPU; 354 355 // Need to default to a target with flat support for HSA. 356 if (TT.getArch() == Triple::amdgcn) 357 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic"; 358 359 return "r600"; 360 } 361 362 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { 363 // The AMDGPU toolchain only supports generating shared objects, so we 364 // must always use PIC. 365 return Reloc::PIC_; 366 } 367 368 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, 369 StringRef CPU, StringRef FS, 370 TargetOptions Options, 371 Optional<Reloc::Model> RM, 372 Optional<CodeModel::Model> CM, 373 CodeGenOpt::Level OptLevel) 374 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), 375 FS, Options, getEffectiveRelocModel(RM), 376 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel), 377 TLOF(createTLOF(getTargetTriple())) { 378 initAsmInfo(); 379 if (TT.getArch() == Triple::amdgcn) { 380 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64")) 381 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64)); 382 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32")) 383 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32)); 384 } 385 } 386 387 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; 388 bool AMDGPUTargetMachine::EnableFunctionCalls = false; 389 bool AMDGPUTargetMachine::EnableFixedFunctionABI = false; 390 391 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; 392 393 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { 394 Attribute GPUAttr = F.getFnAttribute("target-cpu"); 395 return GPUAttr.hasAttribute(Attribute::None) ? 396 getTargetCPU() : GPUAttr.getValueAsString(); 397 } 398 399 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { 400 Attribute FSAttr = F.getFnAttribute("target-features"); 401 402 return FSAttr.hasAttribute(Attribute::None) ? 403 getTargetFeatureString() : 404 FSAttr.getValueAsString(); 405 } 406 407 /// Predicate for Internalize pass. 408 static bool mustPreserveGV(const GlobalValue &GV) { 409 if (const Function *F = dyn_cast<Function>(&GV)) 410 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); 411 412 return !GV.use_empty(); 413 } 414 415 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { 416 Builder.DivergentTarget = true; 417 418 bool EnableOpt = getOptLevel() > CodeGenOpt::None; 419 bool Internalize = InternalizeSymbols; 420 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; 421 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; 422 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; 423 424 if (EnableFunctionCalls) { 425 delete Builder.Inliner; 426 Builder.Inliner = createAMDGPUFunctionInliningPass(); 427 } 428 429 Builder.addExtension( 430 PassManagerBuilder::EP_ModuleOptimizerEarly, 431 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &, 432 legacy::PassManagerBase &PM) { 433 if (AMDGPUAA) { 434 PM.add(createAMDGPUAAWrapperPass()); 435 PM.add(createAMDGPUExternalAAWrapperPass()); 436 } 437 PM.add(createAMDGPUUnifyMetadataPass()); 438 PM.add(createAMDGPUPrintfRuntimeBinding()); 439 PM.add(createAMDGPUPropagateAttributesLatePass(this)); 440 if (Internalize) { 441 PM.add(createInternalizePass(mustPreserveGV)); 442 PM.add(createGlobalDCEPass()); 443 } 444 if (EarlyInline) 445 PM.add(createAMDGPUAlwaysInlinePass(false)); 446 }); 447 448 const auto &Opt = Options; 449 Builder.addExtension( 450 PassManagerBuilder::EP_EarlyAsPossible, 451 [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &, 452 legacy::PassManagerBase &PM) { 453 if (AMDGPUAA) { 454 PM.add(createAMDGPUAAWrapperPass()); 455 PM.add(createAMDGPUExternalAAWrapperPass()); 456 } 457 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this)); 458 PM.add(llvm::createAMDGPUUseNativeCallsPass()); 459 if (LibCallSimplify) 460 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this)); 461 }); 462 463 Builder.addExtension( 464 PassManagerBuilder::EP_CGSCCOptimizerLate, 465 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 466 // Add infer address spaces pass to the opt pipeline after inlining 467 // but before SROA to increase SROA opportunities. 468 PM.add(createInferAddressSpacesPass()); 469 470 // This should run after inlining to have any chance of doing anything, 471 // and before other cleanup optimizations. 472 PM.add(createAMDGPULowerKernelAttributesPass()); 473 }); 474 } 475 476 //===----------------------------------------------------------------------===// 477 // R600 Target Machine (R600 -> Cayman) 478 //===----------------------------------------------------------------------===// 479 480 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT, 481 StringRef CPU, StringRef FS, 482 TargetOptions Options, 483 Optional<Reloc::Model> RM, 484 Optional<CodeModel::Model> CM, 485 CodeGenOpt::Level OL, bool JIT) 486 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) { 487 setRequiresStructuredCFG(true); 488 489 // Override the default since calls aren't supported for r600. 490 if (EnableFunctionCalls && 491 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0) 492 EnableFunctionCalls = false; 493 } 494 495 const R600Subtarget *R600TargetMachine::getSubtargetImpl( 496 const Function &F) const { 497 StringRef GPU = getGPUName(F); 498 StringRef FS = getFeatureString(F); 499 500 SmallString<128> SubtargetKey(GPU); 501 SubtargetKey.append(FS); 502 503 auto &I = SubtargetMap[SubtargetKey]; 504 if (!I) { 505 // This needs to be done before we create a new subtarget since any 506 // creation will depend on the TM and the code generation flags on the 507 // function that reside in TargetOptions. 508 resetTargetOptions(F); 509 I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this); 510 } 511 512 return I.get(); 513 } 514 515 TargetTransformInfo 516 R600TargetMachine::getTargetTransformInfo(const Function &F) { 517 return TargetTransformInfo(R600TTIImpl(this, F)); 518 } 519 520 //===----------------------------------------------------------------------===// 521 // GCN Target Machine (SI+) 522 //===----------------------------------------------------------------------===// 523 524 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, 525 StringRef CPU, StringRef FS, 526 TargetOptions Options, 527 Optional<Reloc::Model> RM, 528 Optional<CodeModel::Model> CM, 529 CodeGenOpt::Level OL, bool JIT) 530 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} 531 532 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const { 533 StringRef GPU = getGPUName(F); 534 StringRef FS = getFeatureString(F); 535 536 SmallString<128> SubtargetKey(GPU); 537 SubtargetKey.append(FS); 538 539 auto &I = SubtargetMap[SubtargetKey]; 540 if (!I) { 541 // This needs to be done before we create a new subtarget since any 542 // creation will depend on the TM and the code generation flags on the 543 // function that reside in TargetOptions. 544 resetTargetOptions(F); 545 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this); 546 } 547 548 I->setScalarizeGlobalBehavior(ScalarizeGlobal); 549 550 return I.get(); 551 } 552 553 TargetTransformInfo 554 GCNTargetMachine::getTargetTransformInfo(const Function &F) { 555 return TargetTransformInfo(GCNTTIImpl(this, F)); 556 } 557 558 //===----------------------------------------------------------------------===// 559 // AMDGPU Pass Setup 560 //===----------------------------------------------------------------------===// 561 562 namespace { 563 564 class AMDGPUPassConfig : public TargetPassConfig { 565 public: 566 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 567 : TargetPassConfig(TM, PM) { 568 // Exceptions and StackMaps are not supported, so these passes will never do 569 // anything. 570 disablePass(&StackMapLivenessID); 571 disablePass(&FuncletLayoutID); 572 } 573 574 AMDGPUTargetMachine &getAMDGPUTargetMachine() const { 575 return getTM<AMDGPUTargetMachine>(); 576 } 577 578 ScheduleDAGInstrs * 579 createMachineScheduler(MachineSchedContext *C) const override { 580 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 581 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 582 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 583 return DAG; 584 } 585 586 void addEarlyCSEOrGVNPass(); 587 void addStraightLineScalarOptimizationPasses(); 588 void addIRPasses() override; 589 void addCodeGenPrepare() override; 590 bool addPreISel() override; 591 bool addInstSelector() override; 592 bool addGCPasses() override; 593 594 std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 595 }; 596 597 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const { 598 return getStandardCSEConfigForOpt(TM->getOptLevel()); 599 } 600 601 class R600PassConfig final : public AMDGPUPassConfig { 602 public: 603 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 604 : AMDGPUPassConfig(TM, PM) {} 605 606 ScheduleDAGInstrs *createMachineScheduler( 607 MachineSchedContext *C) const override { 608 return createR600MachineScheduler(C); 609 } 610 611 bool addPreISel() override; 612 bool addInstSelector() override; 613 void addPreRegAlloc() override; 614 void addPreSched2() override; 615 void addPreEmitPass() override; 616 }; 617 618 class GCNPassConfig final : public AMDGPUPassConfig { 619 public: 620 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 621 : AMDGPUPassConfig(TM, PM) { 622 // It is necessary to know the register usage of the entire call graph. We 623 // allow calls without EnableAMDGPUFunctionCalls if they are marked 624 // noinline, so this is always required. 625 setRequiresCodeGenSCCOrder(true); 626 } 627 628 GCNTargetMachine &getGCNTargetMachine() const { 629 return getTM<GCNTargetMachine>(); 630 } 631 632 ScheduleDAGInstrs * 633 createMachineScheduler(MachineSchedContext *C) const override; 634 635 bool addPreISel() override; 636 void addMachineSSAOptimization() override; 637 bool addILPOpts() override; 638 bool addInstSelector() override; 639 bool addIRTranslator() override; 640 void addPreLegalizeMachineIR() override; 641 bool addLegalizeMachineIR() override; 642 void addPreRegBankSelect() override; 643 bool addRegBankSelect() override; 644 bool addGlobalInstructionSelect() override; 645 void addFastRegAlloc() override; 646 void addOptimizedRegAlloc() override; 647 void addPreRegAlloc() override; 648 bool addPreRewrite() override; 649 void addPostRegAlloc() override; 650 void addPreSched2() override; 651 void addPreEmitPass() override; 652 }; 653 654 } // end anonymous namespace 655 656 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { 657 if (getOptLevel() == CodeGenOpt::Aggressive) 658 addPass(createGVNPass()); 659 else 660 addPass(createEarlyCSEPass()); 661 } 662 663 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { 664 addPass(createLICMPass()); 665 addPass(createSeparateConstOffsetFromGEPPass()); 666 addPass(createSpeculativeExecutionPass()); 667 // ReassociateGEPs exposes more opportunites for SLSR. See 668 // the example in reassociate-geps-and-slsr.ll. 669 addPass(createStraightLineStrengthReducePass()); 670 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or 671 // EarlyCSE can reuse. 672 addEarlyCSEOrGVNPass(); 673 // Run NaryReassociate after EarlyCSE/GVN to be more effective. 674 addPass(createNaryReassociatePass()); 675 // NaryReassociate on GEPs creates redundant common expressions, so run 676 // EarlyCSE after it. 677 addPass(createEarlyCSEPass()); 678 } 679 680 void AMDGPUPassConfig::addIRPasses() { 681 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); 682 683 // There is no reason to run these. 684 disablePass(&StackMapLivenessID); 685 disablePass(&FuncletLayoutID); 686 disablePass(&PatchableFunctionID); 687 688 addPass(createAMDGPUPrintfRuntimeBinding()); 689 690 // This must occur before inlining, as the inliner will not look through 691 // bitcast calls. 692 addPass(createAMDGPUFixFunctionBitcastsPass()); 693 694 // A call to propagate attributes pass in the backend in case opt was not run. 695 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM)); 696 697 addPass(createAtomicExpandPass()); 698 699 700 addPass(createAMDGPULowerIntrinsicsPass()); 701 702 // Function calls are not supported, so make sure we inline everything. 703 addPass(createAMDGPUAlwaysInlinePass()); 704 addPass(createAlwaysInlinerLegacyPass()); 705 // We need to add the barrier noop pass, otherwise adding the function 706 // inlining pass will cause all of the PassConfigs passes to be run 707 // one function at a time, which means if we have a nodule with two 708 // functions, then we will generate code for the first function 709 // without ever running any passes on the second. 710 addPass(createBarrierNoopPass()); 711 712 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. 713 if (TM.getTargetTriple().getArch() == Triple::r600) 714 addPass(createR600OpenCLImageTypeLoweringPass()); 715 716 // Replace OpenCL enqueued block function pointers with global variables. 717 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); 718 719 if (TM.getOptLevel() > CodeGenOpt::None) { 720 addPass(createInferAddressSpacesPass()); 721 addPass(createAMDGPUPromoteAlloca()); 722 723 if (EnableSROA) 724 addPass(createSROAPass()); 725 726 if (EnableScalarIRPasses) 727 addStraightLineScalarOptimizationPasses(); 728 729 if (EnableAMDGPUAliasAnalysis) { 730 addPass(createAMDGPUAAWrapperPass()); 731 addPass(createExternalAAWrapperPass([](Pass &P, Function &, 732 AAResults &AAR) { 733 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) 734 AAR.addAAResult(WrapperPass->getResult()); 735 })); 736 } 737 } 738 739 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 740 // TODO: May want to move later or split into an early and late one. 741 addPass(createAMDGPUCodeGenPreparePass()); 742 } 743 744 TargetPassConfig::addIRPasses(); 745 746 // EarlyCSE is not always strong enough to clean up what LSR produces. For 747 // example, GVN can combine 748 // 749 // %0 = add %a, %b 750 // %1 = add %b, %a 751 // 752 // and 753 // 754 // %0 = shl nsw %a, 2 755 // %1 = shl %a, 2 756 // 757 // but EarlyCSE can do neither of them. 758 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses) 759 addEarlyCSEOrGVNPass(); 760 } 761 762 void AMDGPUPassConfig::addCodeGenPrepare() { 763 if (TM->getTargetTriple().getArch() == Triple::amdgcn) 764 addPass(createAMDGPUAnnotateKernelFeaturesPass()); 765 766 if (TM->getTargetTriple().getArch() == Triple::amdgcn && 767 EnableLowerKernelArguments) 768 addPass(createAMDGPULowerKernelArgumentsPass()); 769 770 addPass(&AMDGPUPerfHintAnalysisID); 771 772 TargetPassConfig::addCodeGenPrepare(); 773 774 if (EnableLoadStoreVectorizer) 775 addPass(createLoadStoreVectorizerPass()); 776 } 777 778 bool AMDGPUPassConfig::addPreISel() { 779 addPass(createLowerSwitchPass()); 780 addPass(createFlattenCFGPass()); 781 return false; 782 } 783 784 bool AMDGPUPassConfig::addInstSelector() { 785 // Defer the verifier until FinalizeISel. 786 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false); 787 return false; 788 } 789 790 bool AMDGPUPassConfig::addGCPasses() { 791 // Do nothing. GC is not supported. 792 return false; 793 } 794 795 //===----------------------------------------------------------------------===// 796 // R600 Pass Setup 797 //===----------------------------------------------------------------------===// 798 799 bool R600PassConfig::addPreISel() { 800 AMDGPUPassConfig::addPreISel(); 801 802 if (EnableR600StructurizeCFG) 803 addPass(createStructurizeCFGPass()); 804 return false; 805 } 806 807 bool R600PassConfig::addInstSelector() { 808 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel())); 809 return false; 810 } 811 812 void R600PassConfig::addPreRegAlloc() { 813 addPass(createR600VectorRegMerger()); 814 } 815 816 void R600PassConfig::addPreSched2() { 817 addPass(createR600EmitClauseMarkers(), false); 818 if (EnableR600IfConvert) 819 addPass(&IfConverterID, false); 820 addPass(createR600ClauseMergePass(), false); 821 } 822 823 void R600PassConfig::addPreEmitPass() { 824 addPass(createAMDGPUCFGStructurizerPass(), false); 825 addPass(createR600ExpandSpecialInstrsPass(), false); 826 addPass(&FinalizeMachineBundlesID, false); 827 addPass(createR600Packetizer(), false); 828 addPass(createR600ControlFlowFinalizer(), false); 829 } 830 831 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) { 832 return new R600PassConfig(*this, PM); 833 } 834 835 //===----------------------------------------------------------------------===// 836 // GCN Pass Setup 837 //===----------------------------------------------------------------------===// 838 839 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( 840 MachineSchedContext *C) const { 841 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>(); 842 if (ST.enableSIScheduler()) 843 return createSIMachineScheduler(C); 844 return createGCNMaxOccupancyMachineScheduler(C); 845 } 846 847 bool GCNPassConfig::addPreISel() { 848 AMDGPUPassConfig::addPreISel(); 849 850 if (EnableAtomicOptimizations) { 851 addPass(createAMDGPUAtomicOptimizerPass()); 852 } 853 854 // FIXME: We need to run a pass to propagate the attributes when calls are 855 // supported. 856 857 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit 858 // regions formed by them. 859 addPass(&AMDGPUUnifyDivergentExitNodesID); 860 if (!LateCFGStructurize) { 861 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions 862 } 863 addPass(createSinkingPass()); 864 addPass(createAMDGPUAnnotateUniformValues()); 865 if (!LateCFGStructurize) { 866 addPass(createSIAnnotateControlFlowPass()); 867 } 868 addPass(createLCSSAPass()); 869 870 return false; 871 } 872 873 void GCNPassConfig::addMachineSSAOptimization() { 874 TargetPassConfig::addMachineSSAOptimization(); 875 876 // We want to fold operands after PeepholeOptimizer has run (or as part of 877 // it), because it will eliminate extra copies making it easier to fold the 878 // real source operand. We want to eliminate dead instructions after, so that 879 // we see fewer uses of the copies. We then need to clean up the dead 880 // instructions leftover after the operands are folded as well. 881 // 882 // XXX - Can we get away without running DeadMachineInstructionElim again? 883 addPass(&SIFoldOperandsID); 884 if (EnableDPPCombine) 885 addPass(&GCNDPPCombineID); 886 addPass(&DeadMachineInstructionElimID); 887 addPass(&SILoadStoreOptimizerID); 888 if (EnableSDWAPeephole) { 889 addPass(&SIPeepholeSDWAID); 890 addPass(&EarlyMachineLICMID); 891 addPass(&MachineCSEID); 892 addPass(&SIFoldOperandsID); 893 addPass(&DeadMachineInstructionElimID); 894 } 895 addPass(createSIShrinkInstructionsPass()); 896 } 897 898 bool GCNPassConfig::addILPOpts() { 899 if (EnableEarlyIfConversion) 900 addPass(&EarlyIfConverterID); 901 902 TargetPassConfig::addILPOpts(); 903 return false; 904 } 905 906 bool GCNPassConfig::addInstSelector() { 907 AMDGPUPassConfig::addInstSelector(); 908 addPass(&SIFixSGPRCopiesID); 909 addPass(createSILowerI1CopiesPass()); 910 addPass(createSIFixupVectorISelPass()); 911 addPass(createSIAddIMGInitPass()); 912 return false; 913 } 914 915 bool GCNPassConfig::addIRTranslator() { 916 addPass(new IRTranslator()); 917 return false; 918 } 919 920 void GCNPassConfig::addPreLegalizeMachineIR() { 921 bool IsOptNone = getOptLevel() == CodeGenOpt::None; 922 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone)); 923 addPass(new Localizer()); 924 } 925 926 bool GCNPassConfig::addLegalizeMachineIR() { 927 addPass(new Legalizer()); 928 return false; 929 } 930 931 void GCNPassConfig::addPreRegBankSelect() { 932 bool IsOptNone = getOptLevel() == CodeGenOpt::None; 933 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone)); 934 } 935 936 bool GCNPassConfig::addRegBankSelect() { 937 addPass(new RegBankSelect()); 938 return false; 939 } 940 941 bool GCNPassConfig::addGlobalInstructionSelect() { 942 addPass(new InstructionSelect()); 943 return false; 944 } 945 946 void GCNPassConfig::addPreRegAlloc() { 947 if (LateCFGStructurize) { 948 addPass(createAMDGPUMachineCFGStructurizerPass()); 949 } 950 addPass(createSIWholeQuadModePass()); 951 } 952 953 void GCNPassConfig::addFastRegAlloc() { 954 // FIXME: We have to disable the verifier here because of PHIElimination + 955 // TwoAddressInstructions disabling it. 956 957 // This must be run immediately after phi elimination and before 958 // TwoAddressInstructions, otherwise the processing of the tied operand of 959 // SI_ELSE will introduce a copy of the tied operand source after the else. 960 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 961 962 // This must be run just after RegisterCoalescing. 963 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); 964 965 TargetPassConfig::addFastRegAlloc(); 966 } 967 968 void GCNPassConfig::addOptimizedRegAlloc() { 969 if (OptExecMaskPreRA) { 970 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); 971 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID); 972 } else { 973 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID); 974 } 975 976 // This must be run immediately after phi elimination and before 977 // TwoAddressInstructions, otherwise the processing of the tied operand of 978 // SI_ELSE will introduce a copy of the tied operand source after the else. 979 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 980 981 // This must be run just after RegisterCoalescing. 982 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); 983 984 if (EnableDCEInRA) 985 insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID); 986 987 TargetPassConfig::addOptimizedRegAlloc(); 988 } 989 990 bool GCNPassConfig::addPreRewrite() { 991 if (EnableRegReassign) { 992 addPass(&GCNNSAReassignID); 993 addPass(&GCNRegBankReassignID); 994 } 995 return true; 996 } 997 998 void GCNPassConfig::addPostRegAlloc() { 999 addPass(&SIFixVGPRCopiesID); 1000 if (getOptLevel() > CodeGenOpt::None) 1001 addPass(&SIOptimizeExecMaskingID); 1002 TargetPassConfig::addPostRegAlloc(); 1003 1004 // Equivalent of PEI for SGPRs. 1005 addPass(&SILowerSGPRSpillsID); 1006 } 1007 1008 void GCNPassConfig::addPreSched2() { 1009 addPass(&SIPostRABundlerID); 1010 } 1011 1012 void GCNPassConfig::addPreEmitPass() { 1013 addPass(createSIMemoryLegalizerPass()); 1014 addPass(createSIInsertWaitcntsPass()); 1015 addPass(createSIShrinkInstructionsPass()); 1016 addPass(createSIModeRegisterPass()); 1017 1018 // The hazard recognizer that runs as part of the post-ra scheduler does not 1019 // guarantee to be able handle all hazards correctly. This is because if there 1020 // are multiple scheduling regions in a basic block, the regions are scheduled 1021 // bottom up, so when we begin to schedule a region we don't know what 1022 // instructions were emitted directly before it. 1023 // 1024 // Here we add a stand-alone hazard recognizer pass which can handle all 1025 // cases. 1026 // 1027 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would 1028 // be better for it to emit S_NOP <N> when possible. 1029 addPass(&PostRAHazardRecognizerID); 1030 1031 addPass(&SIRemoveShortExecBranchesID); 1032 addPass(&SIInsertSkipsPassID); 1033 addPass(&BranchRelaxationPassID); 1034 } 1035 1036 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { 1037 return new GCNPassConfig(*this, PM); 1038 } 1039 1040 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const { 1041 return new yaml::SIMachineFunctionInfo(); 1042 } 1043 1044 yaml::MachineFunctionInfo * 1045 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 1046 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1047 return new yaml::SIMachineFunctionInfo(*MFI, 1048 *MF.getSubtarget().getRegisterInfo()); 1049 } 1050 1051 bool GCNTargetMachine::parseMachineFunctionInfo( 1052 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS, 1053 SMDiagnostic &Error, SMRange &SourceRange) const { 1054 const yaml::SIMachineFunctionInfo &YamlMFI = 1055 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_); 1056 MachineFunction &MF = PFS.MF; 1057 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1058 1059 MFI->initializeBaseYamlFields(YamlMFI); 1060 1061 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) { 1062 // FIXME: Update parseNamedRegsiterReference to take a Register. 1063 unsigned TempReg; 1064 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) { 1065 SourceRange = RegName.SourceRange; 1066 return true; 1067 } 1068 RegVal = TempReg; 1069 1070 return false; 1071 }; 1072 1073 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) { 1074 // Create a diagnostic for a the register string literal. 1075 const MemoryBuffer &Buffer = 1076 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); 1077 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1078 RegName.Value.size(), SourceMgr::DK_Error, 1079 "incorrect register class for field", RegName.Value, 1080 None, None); 1081 SourceRange = RegName.SourceRange; 1082 return true; 1083 }; 1084 1085 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) || 1086 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) || 1087 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg)) 1088 return true; 1089 1090 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG && 1091 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) { 1092 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg); 1093 } 1094 1095 if (MFI->FrameOffsetReg != AMDGPU::FP_REG && 1096 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) { 1097 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg); 1098 } 1099 1100 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG && 1101 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) { 1102 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg); 1103 } 1104 1105 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A, 1106 const TargetRegisterClass &RC, 1107 ArgDescriptor &Arg, unsigned UserSGPRs, 1108 unsigned SystemSGPRs) { 1109 // Skip parsing if it's not present. 1110 if (!A) 1111 return false; 1112 1113 if (A->IsRegister) { 1114 unsigned Reg; 1115 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) { 1116 SourceRange = A->RegisterName.SourceRange; 1117 return true; 1118 } 1119 if (!RC.contains(Reg)) 1120 return diagnoseRegisterClass(A->RegisterName); 1121 Arg = ArgDescriptor::createRegister(Reg); 1122 } else 1123 Arg = ArgDescriptor::createStack(A->StackOffset); 1124 // Check and apply the optional mask. 1125 if (A->Mask) 1126 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue()); 1127 1128 MFI->NumUserSGPRs += UserSGPRs; 1129 MFI->NumSystemSGPRs += SystemSGPRs; 1130 return false; 1131 }; 1132 1133 if (YamlMFI.ArgInfo && 1134 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer, 1135 AMDGPU::SGPR_128RegClass, 1136 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) || 1137 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr, 1138 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr, 1139 2, 0) || 1140 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass, 1141 MFI->ArgInfo.QueuePtr, 2, 0) || 1142 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr, 1143 AMDGPU::SReg_64RegClass, 1144 MFI->ArgInfo.KernargSegmentPtr, 2, 0) || 1145 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID, 1146 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID, 1147 2, 0) || 1148 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit, 1149 AMDGPU::SReg_64RegClass, 1150 MFI->ArgInfo.FlatScratchInit, 2, 0) || 1151 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize, 1152 AMDGPU::SGPR_32RegClass, 1153 MFI->ArgInfo.PrivateSegmentSize, 0, 0) || 1154 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX, 1155 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX, 1156 0, 1) || 1157 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY, 1158 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY, 1159 0, 1) || 1160 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ, 1161 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ, 1162 0, 1) || 1163 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo, 1164 AMDGPU::SGPR_32RegClass, 1165 MFI->ArgInfo.WorkGroupInfo, 0, 1) || 1166 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset, 1167 AMDGPU::SGPR_32RegClass, 1168 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) || 1169 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr, 1170 AMDGPU::SReg_64RegClass, 1171 MFI->ArgInfo.ImplicitArgPtr, 0, 0) || 1172 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr, 1173 AMDGPU::SReg_64RegClass, 1174 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) || 1175 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX, 1176 AMDGPU::VGPR_32RegClass, 1177 MFI->ArgInfo.WorkItemIDX, 0, 0) || 1178 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY, 1179 AMDGPU::VGPR_32RegClass, 1180 MFI->ArgInfo.WorkItemIDY, 0, 0) || 1181 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ, 1182 AMDGPU::VGPR_32RegClass, 1183 MFI->ArgInfo.WorkItemIDZ, 0, 0))) 1184 return true; 1185 1186 MFI->Mode.IEEE = YamlMFI.Mode.IEEE; 1187 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp; 1188 MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals; 1189 MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals; 1190 MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals; 1191 MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals; 1192 1193 return false; 1194 } 1195