1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief The AMDGPU target machine contains all of the hardware specific 12 /// information needed to emit code for R600 and SI GPUs. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPUTargetMachine.h" 17 #include "AMDGPU.h" 18 #include "AMDGPUAliasAnalysis.h" 19 #include "AMDGPUCallLowering.h" 20 #include "AMDGPUInstructionSelector.h" 21 #include "AMDGPULegalizerInfo.h" 22 #include "AMDGPUMacroFusion.h" 23 #include "AMDGPUTargetObjectFile.h" 24 #include "AMDGPUTargetTransformInfo.h" 25 #include "GCNIterativeScheduler.h" 26 #include "GCNSchedStrategy.h" 27 #include "R600MachineScheduler.h" 28 #include "SIMachineScheduler.h" 29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 31 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 33 #include "llvm/CodeGen/Passes.h" 34 #include "llvm/CodeGen/TargetLoweringObjectFile.h" 35 #include "llvm/CodeGen/TargetPassConfig.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/LegacyPassManager.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Compiler.h" 42 #include "llvm/Support/TargetRegistry.h" 43 #include "llvm/Transforms/IPO.h" 44 #include "llvm/Transforms/IPO/AlwaysInliner.h" 45 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 46 #include "llvm/Transforms/Scalar.h" 47 #include "llvm/Transforms/Scalar/GVN.h" 48 #include "llvm/Transforms/Vectorize.h" 49 #include <memory> 50 51 using namespace llvm; 52 53 static cl::opt<bool> EnableR600StructurizeCFG( 54 "r600-ir-structurize", 55 cl::desc("Use StructurizeCFG IR pass"), 56 cl::init(true)); 57 58 static cl::opt<bool> EnableSROA( 59 "amdgpu-sroa", 60 cl::desc("Run SROA after promote alloca pass"), 61 cl::ReallyHidden, 62 cl::init(true)); 63 64 static cl::opt<bool> 65 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, 66 cl::desc("Run early if-conversion"), 67 cl::init(false)); 68 69 static cl::opt<bool> EnableR600IfConvert( 70 "r600-if-convert", 71 cl::desc("Use if conversion pass"), 72 cl::ReallyHidden, 73 cl::init(true)); 74 75 // Option to disable vectorizer for tests. 76 static cl::opt<bool> EnableLoadStoreVectorizer( 77 "amdgpu-load-store-vectorizer", 78 cl::desc("Enable load store vectorizer"), 79 cl::init(true), 80 cl::Hidden); 81 82 // Option to to control global loads scalarization 83 static cl::opt<bool> ScalarizeGlobal( 84 "amdgpu-scalarize-global-loads", 85 cl::desc("Enable global load scalarization"), 86 cl::init(true), 87 cl::Hidden); 88 89 // Option to run internalize pass. 90 static cl::opt<bool> InternalizeSymbols( 91 "amdgpu-internalize-symbols", 92 cl::desc("Enable elimination of non-kernel functions and unused globals"), 93 cl::init(false), 94 cl::Hidden); 95 96 // Option to inline all early. 97 static cl::opt<bool> EarlyInlineAll( 98 "amdgpu-early-inline-all", 99 cl::desc("Inline all functions early"), 100 cl::init(false), 101 cl::Hidden); 102 103 static cl::opt<bool> EnableSDWAPeephole( 104 "amdgpu-sdwa-peephole", 105 cl::desc("Enable SDWA peepholer"), 106 cl::init(true)); 107 108 // Enable address space based alias analysis 109 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, 110 cl::desc("Enable AMDGPU Alias Analysis"), 111 cl::init(true)); 112 113 // Option to enable new waitcnt insertion pass. 114 static cl::opt<bool> EnableSIInsertWaitcntsPass( 115 "enable-si-insert-waitcnts", 116 cl::desc("Use new waitcnt insertion pass"), 117 cl::init(true)); 118 119 // Option to run late CFG structurizer 120 static cl::opt<bool, true> LateCFGStructurize( 121 "amdgpu-late-structurize", 122 cl::desc("Enable late CFG structurization"), 123 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), 124 cl::Hidden); 125 126 static cl::opt<bool> EnableAMDGPUFunctionCalls( 127 "amdgpu-function-calls", 128 cl::Hidden, 129 cl::desc("Enable AMDGPU function call support"), 130 cl::init(false)); 131 132 // Enable lib calls simplifications 133 static cl::opt<bool> EnableLibCallSimplify( 134 "amdgpu-simplify-libcall", 135 cl::desc("Enable mdgpu library simplifications"), 136 cl::init(true), 137 cl::Hidden); 138 139 extern "C" void LLVMInitializeAMDGPUTarget() { 140 // Register the target 141 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); 142 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); 143 144 PassRegistry *PR = PassRegistry::getPassRegistry(); 145 initializeR600ClauseMergePassPass(*PR); 146 initializeR600ControlFlowFinalizerPass(*PR); 147 initializeR600PacketizerPass(*PR); 148 initializeR600ExpandSpecialInstrsPassPass(*PR); 149 initializeR600VectorRegMergerPass(*PR); 150 initializeAMDGPUDAGToDAGISelPass(*PR); 151 initializeSILowerI1CopiesPass(*PR); 152 initializeSIFixSGPRCopiesPass(*PR); 153 initializeSIFixVGPRCopiesPass(*PR); 154 initializeSIFoldOperandsPass(*PR); 155 initializeSIPeepholeSDWAPass(*PR); 156 initializeSIShrinkInstructionsPass(*PR); 157 initializeSIOptimizeExecMaskingPreRAPass(*PR); 158 initializeSILoadStoreOptimizerPass(*PR); 159 initializeAMDGPUAlwaysInlinePass(*PR); 160 initializeAMDGPUAnnotateKernelFeaturesPass(*PR); 161 initializeAMDGPUAnnotateUniformValuesPass(*PR); 162 initializeAMDGPUArgumentUsageInfoPass(*PR); 163 initializeAMDGPULowerIntrinsicsPass(*PR); 164 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); 165 initializeAMDGPUPromoteAllocaPass(*PR); 166 initializeAMDGPUCodeGenPreparePass(*PR); 167 initializeAMDGPURewriteOutArgumentsPass(*PR); 168 initializeAMDGPUUnifyMetadataPass(*PR); 169 initializeSIAnnotateControlFlowPass(*PR); 170 initializeSIInsertWaitsPass(*PR); 171 initializeSIInsertWaitcntsPass(*PR); 172 initializeSIWholeQuadModePass(*PR); 173 initializeSILowerControlFlowPass(*PR); 174 initializeSIInsertSkipsPass(*PR); 175 initializeSIMemoryLegalizerPass(*PR); 176 initializeSIDebuggerInsertNopsPass(*PR); 177 initializeSIOptimizeExecMaskingPass(*PR); 178 initializeSIFixWWMLivenessPass(*PR); 179 initializeAMDGPUUnifyDivergentExitNodesPass(*PR); 180 initializeAMDGPUAAWrapperPassPass(*PR); 181 initializeAMDGPUUseNativeCallsPass(*PR); 182 initializeAMDGPUSimplifyLibCallsPass(*PR); 183 initializeAMDGPUInlinerPass(*PR); 184 } 185 186 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 187 return llvm::make_unique<AMDGPUTargetObjectFile>(); 188 } 189 190 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) { 191 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>()); 192 } 193 194 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { 195 return new SIScheduleDAGMI(C); 196 } 197 198 static ScheduleDAGInstrs * 199 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 200 ScheduleDAGMILive *DAG = 201 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C)); 202 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 203 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 204 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 205 return DAG; 206 } 207 208 static ScheduleDAGInstrs * 209 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { 210 auto DAG = new GCNIterativeScheduler(C, 211 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); 212 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 213 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 214 return DAG; 215 } 216 217 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { 218 return new GCNIterativeScheduler(C, 219 GCNIterativeScheduler::SCHEDULE_MINREGFORCED); 220 } 221 222 static ScheduleDAGInstrs * 223 createIterativeILPMachineScheduler(MachineSchedContext *C) { 224 auto DAG = new GCNIterativeScheduler(C, 225 GCNIterativeScheduler::SCHEDULE_ILP); 226 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 227 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 228 DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); 229 return DAG; 230 } 231 232 static MachineSchedRegistry 233 R600SchedRegistry("r600", "Run R600's custom scheduler", 234 createR600MachineScheduler); 235 236 static MachineSchedRegistry 237 SISchedRegistry("si", "Run SI's custom scheduler", 238 createSIMachineScheduler); 239 240 static MachineSchedRegistry 241 GCNMaxOccupancySchedRegistry("gcn-max-occupancy", 242 "Run GCN scheduler to maximize occupancy", 243 createGCNMaxOccupancyMachineScheduler); 244 245 static MachineSchedRegistry 246 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", 247 "Run GCN scheduler to maximize occupancy (experimental)", 248 createIterativeGCNMaxOccupancyMachineScheduler); 249 250 static MachineSchedRegistry 251 GCNMinRegSchedRegistry("gcn-minreg", 252 "Run GCN iterative scheduler for minimal register usage (experimental)", 253 createMinRegScheduler); 254 255 static MachineSchedRegistry 256 GCNILPSchedRegistry("gcn-ilp", 257 "Run GCN iterative scheduler for ILP scheduling (experimental)", 258 createIterativeILPMachineScheduler); 259 260 static StringRef computeDataLayout(const Triple &TT) { 261 if (TT.getArch() == Triple::r600) { 262 // 32-bit pointers. 263 if (TT.getEnvironmentName() == "amdgiz" || 264 TT.getEnvironmentName() == "amdgizcl") 265 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 266 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"; 267 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 268 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; 269 } 270 271 // 32-bit private, local, and region pointers. 64-bit global, constant and 272 // flat. 273 if (TT.getEnvironmentName() == "amdgiz" || 274 TT.getEnvironmentName() == "amdgizcl") 275 return "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32" 276 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 277 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"; 278 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32" 279 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" 280 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; 281 } 282 283 LLVM_READNONE 284 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { 285 if (!GPU.empty()) 286 return GPU; 287 288 if (TT.getArch() == Triple::amdgcn) 289 return "generic"; 290 291 return "r600"; 292 } 293 294 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { 295 // The AMDGPU toolchain only supports generating shared objects, so we 296 // must always use PIC. 297 return Reloc::PIC_; 298 } 299 300 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) { 301 if (CM) 302 return *CM; 303 return CodeModel::Small; 304 } 305 306 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, 307 StringRef CPU, StringRef FS, 308 TargetOptions Options, 309 Optional<Reloc::Model> RM, 310 Optional<CodeModel::Model> CM, 311 CodeGenOpt::Level OptLevel) 312 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), 313 FS, Options, getEffectiveRelocModel(RM), 314 getEffectiveCodeModel(CM), OptLevel), 315 TLOF(createTLOF(getTargetTriple())) { 316 AS = AMDGPU::getAMDGPUAS(TT); 317 initAsmInfo(); 318 } 319 320 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; 321 322 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; 323 324 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { 325 Attribute GPUAttr = F.getFnAttribute("target-cpu"); 326 return GPUAttr.hasAttribute(Attribute::None) ? 327 getTargetCPU() : GPUAttr.getValueAsString(); 328 } 329 330 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { 331 Attribute FSAttr = F.getFnAttribute("target-features"); 332 333 return FSAttr.hasAttribute(Attribute::None) ? 334 getTargetFeatureString() : 335 FSAttr.getValueAsString(); 336 } 337 338 static ImmutablePass *createAMDGPUExternalAAWrapperPass() { 339 return createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) { 340 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) 341 AAR.addAAResult(WrapperPass->getResult()); 342 }); 343 } 344 345 /// Predicate for Internalize pass. 346 static bool mustPreserveGV(const GlobalValue &GV) { 347 if (const Function *F = dyn_cast<Function>(&GV)) 348 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); 349 350 return !GV.use_empty(); 351 } 352 353 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { 354 Builder.DivergentTarget = true; 355 356 bool EnableOpt = getOptLevel() > CodeGenOpt::None; 357 bool Internalize = InternalizeSymbols; 358 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls; 359 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; 360 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; 361 362 if (EnableAMDGPUFunctionCalls) { 363 delete Builder.Inliner; 364 Builder.Inliner = createAMDGPUFunctionInliningPass(); 365 } 366 367 if (Internalize) { 368 // If we're generating code, we always have the whole program available. The 369 // relocations expected for externally visible functions aren't supported, 370 // so make sure every non-entry function is hidden. 371 Builder.addExtension( 372 PassManagerBuilder::EP_EnabledOnOptLevel0, 373 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 374 PM.add(createInternalizePass(mustPreserveGV)); 375 }); 376 } 377 378 Builder.addExtension( 379 PassManagerBuilder::EP_ModuleOptimizerEarly, 380 [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &, 381 legacy::PassManagerBase &PM) { 382 if (AMDGPUAA) { 383 PM.add(createAMDGPUAAWrapperPass()); 384 PM.add(createAMDGPUExternalAAWrapperPass()); 385 } 386 PM.add(createAMDGPUUnifyMetadataPass()); 387 if (Internalize) { 388 PM.add(createInternalizePass(mustPreserveGV)); 389 PM.add(createGlobalDCEPass()); 390 } 391 if (EarlyInline) 392 PM.add(createAMDGPUAlwaysInlinePass(false)); 393 }); 394 395 const auto &Opt = Options; 396 Builder.addExtension( 397 PassManagerBuilder::EP_EarlyAsPossible, 398 [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &, 399 legacy::PassManagerBase &PM) { 400 if (AMDGPUAA) { 401 PM.add(createAMDGPUAAWrapperPass()); 402 PM.add(createAMDGPUExternalAAWrapperPass()); 403 } 404 PM.add(llvm::createAMDGPUUseNativeCallsPass()); 405 if (LibCallSimplify) 406 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt)); 407 }); 408 409 Builder.addExtension( 410 PassManagerBuilder::EP_CGSCCOptimizerLate, 411 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 412 // Add infer address spaces pass to the opt pipeline after inlining 413 // but before SROA to increase SROA opportunities. 414 PM.add(createInferAddressSpacesPass()); 415 }); 416 } 417 418 //===----------------------------------------------------------------------===// 419 // R600 Target Machine (R600 -> Cayman) 420 //===----------------------------------------------------------------------===// 421 422 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT, 423 StringRef CPU, StringRef FS, 424 TargetOptions Options, 425 Optional<Reloc::Model> RM, 426 Optional<CodeModel::Model> CM, 427 CodeGenOpt::Level OL, bool JIT) 428 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) { 429 setRequiresStructuredCFG(true); 430 } 431 432 const R600Subtarget *R600TargetMachine::getSubtargetImpl( 433 const Function &F) const { 434 StringRef GPU = getGPUName(F); 435 StringRef FS = getFeatureString(F); 436 437 SmallString<128> SubtargetKey(GPU); 438 SubtargetKey.append(FS); 439 440 auto &I = SubtargetMap[SubtargetKey]; 441 if (!I) { 442 // This needs to be done before we create a new subtarget since any 443 // creation will depend on the TM and the code generation flags on the 444 // function that reside in TargetOptions. 445 resetTargetOptions(F); 446 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this); 447 } 448 449 return I.get(); 450 } 451 452 //===----------------------------------------------------------------------===// 453 // GCN Target Machine (SI+) 454 //===----------------------------------------------------------------------===// 455 456 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, 457 StringRef CPU, StringRef FS, 458 TargetOptions Options, 459 Optional<Reloc::Model> RM, 460 Optional<CodeModel::Model> CM, 461 CodeGenOpt::Level OL, bool JIT) 462 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} 463 464 const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const { 465 StringRef GPU = getGPUName(F); 466 StringRef FS = getFeatureString(F); 467 468 SmallString<128> SubtargetKey(GPU); 469 SubtargetKey.append(FS); 470 471 auto &I = SubtargetMap[SubtargetKey]; 472 if (!I) { 473 // This needs to be done before we create a new subtarget since any 474 // creation will depend on the TM and the code generation flags on the 475 // function that reside in TargetOptions. 476 resetTargetOptions(F); 477 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this); 478 } 479 480 I->setScalarizeGlobalBehavior(ScalarizeGlobal); 481 482 return I.get(); 483 } 484 485 //===----------------------------------------------------------------------===// 486 // AMDGPU Pass Setup 487 //===----------------------------------------------------------------------===// 488 489 namespace { 490 491 class AMDGPUPassConfig : public TargetPassConfig { 492 public: 493 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 494 : TargetPassConfig(TM, PM) { 495 // Exceptions and StackMaps are not supported, so these passes will never do 496 // anything. 497 disablePass(&StackMapLivenessID); 498 disablePass(&FuncletLayoutID); 499 } 500 501 AMDGPUTargetMachine &getAMDGPUTargetMachine() const { 502 return getTM<AMDGPUTargetMachine>(); 503 } 504 505 ScheduleDAGInstrs * 506 createMachineScheduler(MachineSchedContext *C) const override { 507 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 508 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 509 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 510 return DAG; 511 } 512 513 void addEarlyCSEOrGVNPass(); 514 void addStraightLineScalarOptimizationPasses(); 515 void addIRPasses() override; 516 void addCodeGenPrepare() override; 517 bool addPreISel() override; 518 bool addInstSelector() override; 519 bool addGCPasses() override; 520 }; 521 522 class R600PassConfig final : public AMDGPUPassConfig { 523 public: 524 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 525 : AMDGPUPassConfig(TM, PM) {} 526 527 ScheduleDAGInstrs *createMachineScheduler( 528 MachineSchedContext *C) const override { 529 return createR600MachineScheduler(C); 530 } 531 532 bool addPreISel() override; 533 bool addInstSelector() override; 534 void addPreRegAlloc() override; 535 void addPreSched2() override; 536 void addPreEmitPass() override; 537 }; 538 539 class GCNPassConfig final : public AMDGPUPassConfig { 540 public: 541 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) 542 : AMDGPUPassConfig(TM, PM) { 543 // It is necessary to know the register usage of the entire call graph. We 544 // allow calls without EnableAMDGPUFunctionCalls if they are marked 545 // noinline, so this is always required. 546 setRequiresCodeGenSCCOrder(true); 547 } 548 549 GCNTargetMachine &getGCNTargetMachine() const { 550 return getTM<GCNTargetMachine>(); 551 } 552 553 ScheduleDAGInstrs * 554 createMachineScheduler(MachineSchedContext *C) const override; 555 556 bool addPreISel() override; 557 void addMachineSSAOptimization() override; 558 bool addILPOpts() override; 559 bool addInstSelector() override; 560 bool addIRTranslator() override; 561 bool addLegalizeMachineIR() override; 562 bool addRegBankSelect() override; 563 bool addGlobalInstructionSelect() override; 564 void addFastRegAlloc(FunctionPass *RegAllocPass) override; 565 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override; 566 void addPreRegAlloc() override; 567 void addPostRegAlloc() override; 568 void addPreSched2() override; 569 void addPreEmitPass() override; 570 }; 571 572 } // end anonymous namespace 573 574 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() { 575 return TargetIRAnalysis([this](const Function &F) { 576 return TargetTransformInfo(AMDGPUTTIImpl(this, F)); 577 }); 578 } 579 580 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { 581 if (getOptLevel() == CodeGenOpt::Aggressive) 582 addPass(createGVNPass()); 583 else 584 addPass(createEarlyCSEPass()); 585 } 586 587 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { 588 addPass(createSeparateConstOffsetFromGEPPass()); 589 addPass(createSpeculativeExecutionPass()); 590 // ReassociateGEPs exposes more opportunites for SLSR. See 591 // the example in reassociate-geps-and-slsr.ll. 592 addPass(createStraightLineStrengthReducePass()); 593 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or 594 // EarlyCSE can reuse. 595 addEarlyCSEOrGVNPass(); 596 // Run NaryReassociate after EarlyCSE/GVN to be more effective. 597 addPass(createNaryReassociatePass()); 598 // NaryReassociate on GEPs creates redundant common expressions, so run 599 // EarlyCSE after it. 600 addPass(createEarlyCSEPass()); 601 } 602 603 void AMDGPUPassConfig::addIRPasses() { 604 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); 605 606 // There is no reason to run these. 607 disablePass(&StackMapLivenessID); 608 disablePass(&FuncletLayoutID); 609 disablePass(&PatchableFunctionID); 610 611 addPass(createAMDGPULowerIntrinsicsPass()); 612 613 if (TM.getTargetTriple().getArch() == Triple::r600 || 614 !EnableAMDGPUFunctionCalls) { 615 // Function calls are not supported, so make sure we inline everything. 616 addPass(createAMDGPUAlwaysInlinePass()); 617 addPass(createAlwaysInlinerLegacyPass()); 618 // We need to add the barrier noop pass, otherwise adding the function 619 // inlining pass will cause all of the PassConfigs passes to be run 620 // one function at a time, which means if we have a nodule with two 621 // functions, then we will generate code for the first function 622 // without ever running any passes on the second. 623 addPass(createBarrierNoopPass()); 624 } 625 626 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 627 // TODO: May want to move later or split into an early and late one. 628 629 addPass(createAMDGPUCodeGenPreparePass()); 630 } 631 632 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. 633 addPass(createAMDGPUOpenCLImageTypeLoweringPass()); 634 635 // Replace OpenCL enqueued block function pointers with global variables. 636 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); 637 638 if (TM.getOptLevel() > CodeGenOpt::None) { 639 addPass(createInferAddressSpacesPass()); 640 addPass(createAMDGPUPromoteAlloca()); 641 642 if (EnableSROA) 643 addPass(createSROAPass()); 644 645 addStraightLineScalarOptimizationPasses(); 646 647 if (EnableAMDGPUAliasAnalysis) { 648 addPass(createAMDGPUAAWrapperPass()); 649 addPass(createExternalAAWrapperPass([](Pass &P, Function &, 650 AAResults &AAR) { 651 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) 652 AAR.addAAResult(WrapperPass->getResult()); 653 })); 654 } 655 } 656 657 TargetPassConfig::addIRPasses(); 658 659 // EarlyCSE is not always strong enough to clean up what LSR produces. For 660 // example, GVN can combine 661 // 662 // %0 = add %a, %b 663 // %1 = add %b, %a 664 // 665 // and 666 // 667 // %0 = shl nsw %a, 2 668 // %1 = shl %a, 2 669 // 670 // but EarlyCSE can do neither of them. 671 if (getOptLevel() != CodeGenOpt::None) 672 addEarlyCSEOrGVNPass(); 673 } 674 675 void AMDGPUPassConfig::addCodeGenPrepare() { 676 TargetPassConfig::addCodeGenPrepare(); 677 678 if (EnableLoadStoreVectorizer) 679 addPass(createLoadStoreVectorizerPass()); 680 } 681 682 bool AMDGPUPassConfig::addPreISel() { 683 addPass(createFlattenCFGPass()); 684 return false; 685 } 686 687 bool AMDGPUPassConfig::addInstSelector() { 688 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel())); 689 return false; 690 } 691 692 bool AMDGPUPassConfig::addGCPasses() { 693 // Do nothing. GC is not supported. 694 return false; 695 } 696 697 //===----------------------------------------------------------------------===// 698 // R600 Pass Setup 699 //===----------------------------------------------------------------------===// 700 701 bool R600PassConfig::addPreISel() { 702 AMDGPUPassConfig::addPreISel(); 703 704 if (EnableR600StructurizeCFG) 705 addPass(createStructurizeCFGPass()); 706 return false; 707 } 708 709 bool R600PassConfig::addInstSelector() { 710 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel())); 711 return false; 712 } 713 714 void R600PassConfig::addPreRegAlloc() { 715 addPass(createR600VectorRegMerger()); 716 } 717 718 void R600PassConfig::addPreSched2() { 719 addPass(createR600EmitClauseMarkers(), false); 720 if (EnableR600IfConvert) 721 addPass(&IfConverterID, false); 722 addPass(createR600ClauseMergePass(), false); 723 } 724 725 void R600PassConfig::addPreEmitPass() { 726 addPass(createAMDGPUCFGStructurizerPass(), false); 727 addPass(createR600ExpandSpecialInstrsPass(), false); 728 addPass(&FinalizeMachineBundlesID, false); 729 addPass(createR600Packetizer(), false); 730 addPass(createR600ControlFlowFinalizer(), false); 731 } 732 733 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) { 734 return new R600PassConfig(*this, PM); 735 } 736 737 //===----------------------------------------------------------------------===// 738 // GCN Pass Setup 739 //===----------------------------------------------------------------------===// 740 741 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( 742 MachineSchedContext *C) const { 743 const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>(); 744 if (ST.enableSIScheduler()) 745 return createSIMachineScheduler(C); 746 return createGCNMaxOccupancyMachineScheduler(C); 747 } 748 749 bool GCNPassConfig::addPreISel() { 750 AMDGPUPassConfig::addPreISel(); 751 752 // FIXME: We need to run a pass to propagate the attributes when calls are 753 // supported. 754 addPass(createAMDGPUAnnotateKernelFeaturesPass()); 755 756 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit 757 // regions formed by them. 758 addPass(&AMDGPUUnifyDivergentExitNodesID); 759 if (!LateCFGStructurize) { 760 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions 761 } 762 addPass(createSinkingPass()); 763 addPass(createAMDGPUAnnotateUniformValues()); 764 if (!LateCFGStructurize) { 765 addPass(createSIAnnotateControlFlowPass()); 766 } 767 768 return false; 769 } 770 771 void GCNPassConfig::addMachineSSAOptimization() { 772 TargetPassConfig::addMachineSSAOptimization(); 773 774 // We want to fold operands after PeepholeOptimizer has run (or as part of 775 // it), because it will eliminate extra copies making it easier to fold the 776 // real source operand. We want to eliminate dead instructions after, so that 777 // we see fewer uses of the copies. We then need to clean up the dead 778 // instructions leftover after the operands are folded as well. 779 // 780 // XXX - Can we get away without running DeadMachineInstructionElim again? 781 addPass(&SIFoldOperandsID); 782 addPass(&DeadMachineInstructionElimID); 783 addPass(&SILoadStoreOptimizerID); 784 if (EnableSDWAPeephole) { 785 addPass(&SIPeepholeSDWAID); 786 addPass(&MachineLICMID); 787 addPass(&MachineCSEID); 788 addPass(&SIFoldOperandsID); 789 addPass(&DeadMachineInstructionElimID); 790 } 791 addPass(createSIShrinkInstructionsPass()); 792 } 793 794 bool GCNPassConfig::addILPOpts() { 795 if (EnableEarlyIfConversion) 796 addPass(&EarlyIfConverterID); 797 798 TargetPassConfig::addILPOpts(); 799 return false; 800 } 801 802 bool GCNPassConfig::addInstSelector() { 803 AMDGPUPassConfig::addInstSelector(); 804 addPass(createSILowerI1CopiesPass()); 805 addPass(&SIFixSGPRCopiesID); 806 return false; 807 } 808 809 bool GCNPassConfig::addIRTranslator() { 810 addPass(new IRTranslator()); 811 return false; 812 } 813 814 bool GCNPassConfig::addLegalizeMachineIR() { 815 addPass(new Legalizer()); 816 return false; 817 } 818 819 bool GCNPassConfig::addRegBankSelect() { 820 addPass(new RegBankSelect()); 821 return false; 822 } 823 824 bool GCNPassConfig::addGlobalInstructionSelect() { 825 addPass(new InstructionSelect()); 826 return false; 827 } 828 829 void GCNPassConfig::addPreRegAlloc() { 830 if (LateCFGStructurize) { 831 addPass(createAMDGPUMachineCFGStructurizerPass()); 832 } 833 addPass(createSIWholeQuadModePass()); 834 } 835 836 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { 837 // FIXME: We have to disable the verifier here because of PHIElimination + 838 // TwoAddressInstructions disabling it. 839 840 // This must be run immediately after phi elimination and before 841 // TwoAddressInstructions, otherwise the processing of the tied operand of 842 // SI_ELSE will introduce a copy of the tied operand source after the else. 843 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 844 845 // This must be run after SILowerControlFlow, since it needs to use the 846 // machine-level CFG, but before register allocation. 847 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false); 848 849 TargetPassConfig::addFastRegAlloc(RegAllocPass); 850 } 851 852 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { 853 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); 854 855 // This must be run immediately after phi elimination and before 856 // TwoAddressInstructions, otherwise the processing of the tied operand of 857 // SI_ELSE will introduce a copy of the tied operand source after the else. 858 insertPass(&PHIEliminationID, &SILowerControlFlowID, false); 859 860 // This must be run after SILowerControlFlow, since it needs to use the 861 // machine-level CFG, but before register allocation. 862 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false); 863 864 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass); 865 } 866 867 void GCNPassConfig::addPostRegAlloc() { 868 addPass(&SIFixVGPRCopiesID); 869 addPass(&SIOptimizeExecMaskingID); 870 TargetPassConfig::addPostRegAlloc(); 871 } 872 873 void GCNPassConfig::addPreSched2() { 874 } 875 876 void GCNPassConfig::addPreEmitPass() { 877 // The hazard recognizer that runs as part of the post-ra scheduler does not 878 // guarantee to be able handle all hazards correctly. This is because if there 879 // are multiple scheduling regions in a basic block, the regions are scheduled 880 // bottom up, so when we begin to schedule a region we don't know what 881 // instructions were emitted directly before it. 882 // 883 // Here we add a stand-alone hazard recognizer pass which can handle all 884 // cases. 885 addPass(&PostRAHazardRecognizerID); 886 887 if (EnableSIInsertWaitcntsPass) 888 addPass(createSIInsertWaitcntsPass()); 889 else 890 addPass(createSIInsertWaitsPass()); 891 addPass(createSIShrinkInstructionsPass()); 892 addPass(&SIInsertSkipsPassID); 893 addPass(createSIMemoryLegalizerPass()); 894 addPass(createSIDebuggerInsertNopsPass()); 895 addPass(&BranchRelaxationPassID); 896 } 897 898 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { 899 return new GCNPassConfig(*this, PM); 900 } 901 902