1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass lowers the pseudo control flow instructions to real 12 /// machine instructions. 13 /// 14 /// All control flow is handled using predicated instructions and 15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17 /// by writting to the 64-bit EXEC register (each bit corresponds to a 18 /// single vector ALU). Typically, for predicates, a vector ALU will write 19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the 21 /// EXEC to update the predicates. 22 /// 23 /// For example: 24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25 /// %SGPR0 = SI_IF %VCC 26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27 /// %SGPR0 = SI_ELSE %SGPR0 28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29 /// SI_END_CF %SGPR0 30 /// 31 /// becomes: 32 /// 33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional 36 /// // optimization which allows us to 37 /// // branch if all the bits of 38 /// // EXEC are zero. 39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40 /// 41 /// label0: 42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44 /// S_BRANCH_EXECZ label1 // Use our branch optimization 45 /// // instruction again. 46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47 /// label1: 48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49 //===----------------------------------------------------------------------===// 50 51 #include "AMDGPU.h" 52 #include "AMDGPUSubtarget.h" 53 #include "SIInstrInfo.h" 54 #include "SIMachineFunctionInfo.h" 55 #include "llvm/CodeGen/MachineFrameInfo.h" 56 #include "llvm/CodeGen/MachineFunction.h" 57 #include "llvm/CodeGen/MachineFunctionPass.h" 58 #include "llvm/CodeGen/MachineInstrBuilder.h" 59 #include "llvm/CodeGen/MachineRegisterInfo.h" 60 #include "llvm/IR/Constants.h" 61 62 using namespace llvm; 63 64 #define DEBUG_TYPE "si-lower-control-flow" 65 66 namespace { 67 68 class SILowerControlFlow : public MachineFunctionPass { 69 private: 70 static const unsigned SkipThreshold = 12; 71 72 const SIRegisterInfo *TRI; 73 const SIInstrInfo *TII; 74 75 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 76 77 void Skip(MachineInstr &From, MachineOperand &To); 78 void SkipIfDead(MachineInstr &MI); 79 80 void If(MachineInstr &MI); 81 void Else(MachineInstr &MI, bool ExecModified); 82 void Break(MachineInstr &MI); 83 void IfBreak(MachineInstr &MI); 84 void ElseBreak(MachineInstr &MI); 85 void Loop(MachineInstr &MI); 86 void EndCf(MachineInstr &MI); 87 88 void Kill(MachineInstr &MI); 89 void Branch(MachineInstr &MI); 90 91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0); 92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset); 93 void IndirectSrc(MachineInstr &MI); 94 void IndirectDst(MachineInstr &MI); 95 96 public: 97 static char ID; 98 99 SILowerControlFlow() : 100 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 101 102 bool runOnMachineFunction(MachineFunction &MF) override; 103 104 const char *getPassName() const override { 105 return "SI Lower control flow pseudo instructions"; 106 } 107 108 void getAnalysisUsage(AnalysisUsage &AU) const override { 109 AU.setPreservesCFG(); 110 MachineFunctionPass::getAnalysisUsage(AU); 111 } 112 }; 113 114 } // End anonymous namespace 115 116 char SILowerControlFlow::ID = 0; 117 118 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE, 119 "SI lower control flow", false, false) 120 121 char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID; 122 123 124 FunctionPass *llvm::createSILowerControlFlowPass() { 125 return new SILowerControlFlow(); 126 } 127 128 static bool opcodeEmitsNoInsts(unsigned Opc) { 129 switch (Opc) { 130 case TargetOpcode::IMPLICIT_DEF: 131 case TargetOpcode::KILL: 132 case TargetOpcode::BUNDLE: 133 case TargetOpcode::CFI_INSTRUCTION: 134 case TargetOpcode::EH_LABEL: 135 case TargetOpcode::GC_LABEL: 136 case TargetOpcode::DBG_VALUE: 137 return true; 138 default: 139 return false; 140 } 141 } 142 143 bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From, 144 MachineBasicBlock *To) { 145 146 unsigned NumInstr = 0; 147 MachineFunction *MF = From->getParent(); 148 149 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end(); 150 MBBI != End && MBBI != ToI; ++MBBI) { 151 MachineBasicBlock &MBB = *MBBI; 152 153 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 154 NumInstr < SkipThreshold && I != E; ++I) { 155 if (opcodeEmitsNoInsts(I->getOpcode())) 156 continue; 157 158 // When a uniform loop is inside non-uniform control flow, the branch 159 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken 160 // when EXEC = 0. We should skip the loop lest it becomes infinite. 161 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || 162 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) 163 return true; 164 165 if (++NumInstr >= SkipThreshold) 166 return true; 167 } 168 } 169 170 return false; 171 } 172 173 void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) { 174 175 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 176 return; 177 178 DebugLoc DL = From.getDebugLoc(); 179 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 180 .addOperand(To); 181 } 182 183 void SILowerControlFlow::SkipIfDead(MachineInstr &MI) { 184 185 MachineBasicBlock &MBB = *MI.getParent(); 186 DebugLoc DL = MI.getDebugLoc(); 187 188 if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS || 189 !shouldSkip(&MBB, &MBB.getParent()->back())) 190 return; 191 192 MachineBasicBlock::iterator Insert = &MI; 193 ++Insert; 194 195 // If the exec mask is non-zero, skip the next two instructions 196 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 197 .addImm(3); 198 199 // Exec mask is zero: Export to NULL target... 200 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 201 .addImm(0) 202 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 203 .addImm(0) 204 .addImm(1) 205 .addImm(1) 206 .addReg(AMDGPU::VGPR0) 207 .addReg(AMDGPU::VGPR0) 208 .addReg(AMDGPU::VGPR0) 209 .addReg(AMDGPU::VGPR0); 210 211 // ... and terminate wavefront 212 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 213 } 214 215 void SILowerControlFlow::If(MachineInstr &MI) { 216 MachineBasicBlock &MBB = *MI.getParent(); 217 DebugLoc DL = MI.getDebugLoc(); 218 unsigned Reg = MI.getOperand(0).getReg(); 219 unsigned Vcc = MI.getOperand(1).getReg(); 220 221 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 222 .addReg(Vcc); 223 224 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 225 .addReg(AMDGPU::EXEC) 226 .addReg(Reg); 227 228 Skip(MI, MI.getOperand(2)); 229 230 MI.eraseFromParent(); 231 } 232 233 void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) { 234 MachineBasicBlock &MBB = *MI.getParent(); 235 DebugLoc DL = MI.getDebugLoc(); 236 unsigned Dst = MI.getOperand(0).getReg(); 237 unsigned Src = MI.getOperand(1).getReg(); 238 239 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 240 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 241 .addReg(Src); // Saved EXEC 242 243 if (ExecModified) { 244 // Adjust the saved exec to account for the modifications during the flow 245 // block that contains the ELSE. This can happen when WQM mode is switched 246 // off. 247 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) 248 .addReg(AMDGPU::EXEC) 249 .addReg(Dst); 250 } 251 252 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 253 .addReg(AMDGPU::EXEC) 254 .addReg(Dst); 255 256 Skip(MI, MI.getOperand(2)); 257 258 MI.eraseFromParent(); 259 } 260 261 void SILowerControlFlow::Break(MachineInstr &MI) { 262 MachineBasicBlock &MBB = *MI.getParent(); 263 DebugLoc DL = MI.getDebugLoc(); 264 265 unsigned Dst = MI.getOperand(0).getReg(); 266 unsigned Src = MI.getOperand(1).getReg(); 267 268 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 269 .addReg(AMDGPU::EXEC) 270 .addReg(Src); 271 272 MI.eraseFromParent(); 273 } 274 275 void SILowerControlFlow::IfBreak(MachineInstr &MI) { 276 MachineBasicBlock &MBB = *MI.getParent(); 277 DebugLoc DL = MI.getDebugLoc(); 278 279 unsigned Dst = MI.getOperand(0).getReg(); 280 unsigned Vcc = MI.getOperand(1).getReg(); 281 unsigned Src = MI.getOperand(2).getReg(); 282 283 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 284 .addReg(Vcc) 285 .addReg(Src); 286 287 MI.eraseFromParent(); 288 } 289 290 void SILowerControlFlow::ElseBreak(MachineInstr &MI) { 291 MachineBasicBlock &MBB = *MI.getParent(); 292 DebugLoc DL = MI.getDebugLoc(); 293 294 unsigned Dst = MI.getOperand(0).getReg(); 295 unsigned Saved = MI.getOperand(1).getReg(); 296 unsigned Src = MI.getOperand(2).getReg(); 297 298 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 299 .addReg(Saved) 300 .addReg(Src); 301 302 MI.eraseFromParent(); 303 } 304 305 void SILowerControlFlow::Loop(MachineInstr &MI) { 306 MachineBasicBlock &MBB = *MI.getParent(); 307 DebugLoc DL = MI.getDebugLoc(); 308 unsigned Src = MI.getOperand(0).getReg(); 309 310 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 311 .addReg(AMDGPU::EXEC) 312 .addReg(Src); 313 314 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 315 .addOperand(MI.getOperand(1)); 316 317 MI.eraseFromParent(); 318 } 319 320 void SILowerControlFlow::EndCf(MachineInstr &MI) { 321 MachineBasicBlock &MBB = *MI.getParent(); 322 DebugLoc DL = MI.getDebugLoc(); 323 unsigned Reg = MI.getOperand(0).getReg(); 324 325 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 326 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 327 .addReg(AMDGPU::EXEC) 328 .addReg(Reg); 329 330 MI.eraseFromParent(); 331 } 332 333 void SILowerControlFlow::Branch(MachineInstr &MI) { 334 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode()) 335 MI.eraseFromParent(); 336 337 // If these aren't equal, this is probably an infinite loop. 338 } 339 340 void SILowerControlFlow::Kill(MachineInstr &MI) { 341 MachineBasicBlock &MBB = *MI.getParent(); 342 DebugLoc DL = MI.getDebugLoc(); 343 const MachineOperand &Op = MI.getOperand(0); 344 345 #ifndef NDEBUG 346 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv(); 347 // Kill is only allowed in pixel / geometry shaders. 348 assert(CallConv == CallingConv::AMDGPU_PS || 349 CallConv == CallingConv::AMDGPU_GS); 350 #endif 351 352 // Clear this thread from the exec mask if the operand is negative 353 if ((Op.isImm())) { 354 // Constant operand: Set exec mask to 0 or do nothing 355 if (Op.getImm() & 0x80000000) { 356 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 357 .addImm(0); 358 } 359 } else { 360 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) 361 .addImm(0) 362 .addOperand(Op); 363 } 364 365 MI.eraseFromParent(); 366 } 367 368 void SILowerControlFlow::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) { 369 370 MachineBasicBlock &MBB = *MI.getParent(); 371 DebugLoc DL = MI.getDebugLoc(); 372 MachineBasicBlock::iterator I = MI; 373 374 unsigned Save = MI.getOperand(1).getReg(); 375 unsigned Idx = MI.getOperand(3).getReg(); 376 377 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 378 if (Offset) { 379 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 380 .addReg(Idx) 381 .addImm(Offset); 382 } else { 383 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 384 .addReg(Idx); 385 } 386 MBB.insert(I, MovRel); 387 } else { 388 389 assert(AMDGPU::SReg_64RegClass.contains(Save)); 390 assert(AMDGPU::VGPR_32RegClass.contains(Idx)); 391 392 // Save the EXEC mask 393 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 394 .addReg(AMDGPU::EXEC); 395 396 // Read the next variant into VCC (lower 32 bits) <- also loop target 397 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), 398 AMDGPU::VCC_LO) 399 .addReg(Idx); 400 401 // Move index from VCC into M0 402 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 403 .addReg(AMDGPU::VCC_LO); 404 405 // Compare the just read M0 value to all possible Idx values 406 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32)) 407 .addReg(AMDGPU::M0) 408 .addReg(Idx); 409 410 // Update EXEC, save the original EXEC value to VCC 411 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 412 .addReg(AMDGPU::VCC); 413 414 if (Offset) { 415 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 416 .addReg(AMDGPU::M0) 417 .addImm(Offset); 418 } 419 // Do the actual move 420 MBB.insert(I, MovRel); 421 422 // Update EXEC, switch all done bits to 0 and all todo bits to 1 423 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 424 .addReg(AMDGPU::EXEC) 425 .addReg(AMDGPU::VCC); 426 427 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 428 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 429 .addImm(-7); 430 431 // Restore EXEC 432 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 433 .addReg(Save); 434 435 } 436 MI.eraseFromParent(); 437 } 438 439 /// \param @VecReg The register which holds element zero of the vector 440 /// being addressed into. 441 /// \param[out] @Reg The base register to use in the indirect addressing instruction. 442 /// \param[in,out] @Offset As an input, this is the constant offset part of the 443 // indirect Index. e.g. v0 = v[VecReg + Offset] 444 // As an output, this is a constant value that needs 445 // to be added to the value stored in M0. 446 void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg, 447 unsigned &Reg, 448 int &Offset) { 449 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0); 450 if (!SubReg) 451 SubReg = VecReg; 452 453 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg); 454 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset; 455 456 if (RegIdx < 0) { 457 Offset = RegIdx; 458 RegIdx = 0; 459 } else { 460 Offset = 0; 461 } 462 463 Reg = RC->getRegister(RegIdx); 464 } 465 466 void SILowerControlFlow::IndirectSrc(MachineInstr &MI) { 467 468 MachineBasicBlock &MBB = *MI.getParent(); 469 DebugLoc DL = MI.getDebugLoc(); 470 471 unsigned Dst = MI.getOperand(0).getReg(); 472 unsigned Vec = MI.getOperand(2).getReg(); 473 int Off = MI.getOperand(4).getImm(); 474 unsigned Reg; 475 476 computeIndirectRegAndOffset(Vec, Reg, Off); 477 478 MachineInstr *MovRel = 479 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 480 .addReg(Reg) 481 .addReg(Vec, RegState::Implicit); 482 483 LoadM0(MI, MovRel, Off); 484 } 485 486 void SILowerControlFlow::IndirectDst(MachineInstr &MI) { 487 488 MachineBasicBlock &MBB = *MI.getParent(); 489 DebugLoc DL = MI.getDebugLoc(); 490 491 unsigned Dst = MI.getOperand(0).getReg(); 492 int Off = MI.getOperand(4).getImm(); 493 unsigned Val = MI.getOperand(5).getReg(); 494 unsigned Reg; 495 496 computeIndirectRegAndOffset(Dst, Reg, Off); 497 498 MachineInstr *MovRel = 499 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 500 .addReg(Reg, RegState::Define) 501 .addReg(Val) 502 .addReg(Dst, RegState::Implicit); 503 504 LoadM0(MI, MovRel, Off); 505 } 506 507 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { 508 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 509 TRI = 510 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 511 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 512 513 bool HaveKill = false; 514 bool NeedFlat = false; 515 unsigned Depth = 0; 516 517 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 518 BI != BE; ++BI) { 519 520 MachineBasicBlock *EmptyMBBAtEnd = NULL; 521 MachineBasicBlock &MBB = *BI; 522 MachineBasicBlock::iterator I, Next; 523 bool ExecModified = false; 524 525 for (I = MBB.begin(); I != MBB.end(); I = Next) { 526 Next = std::next(I); 527 528 MachineInstr &MI = *I; 529 530 // Flat uses m0 in case it needs to access LDS. 531 if (TII->isFLAT(MI)) 532 NeedFlat = true; 533 534 for (const auto &Def : I->defs()) { 535 if (Def.isReg() && Def.isDef() && Def.getReg() == AMDGPU::EXEC) { 536 ExecModified = true; 537 break; 538 } 539 } 540 541 switch (MI.getOpcode()) { 542 default: break; 543 case AMDGPU::SI_IF: 544 ++Depth; 545 If(MI); 546 break; 547 548 case AMDGPU::SI_ELSE: 549 Else(MI, ExecModified); 550 break; 551 552 case AMDGPU::SI_BREAK: 553 Break(MI); 554 break; 555 556 case AMDGPU::SI_IF_BREAK: 557 IfBreak(MI); 558 break; 559 560 case AMDGPU::SI_ELSE_BREAK: 561 ElseBreak(MI); 562 break; 563 564 case AMDGPU::SI_LOOP: 565 ++Depth; 566 Loop(MI); 567 break; 568 569 case AMDGPU::SI_END_CF: 570 if (--Depth == 0 && HaveKill) { 571 SkipIfDead(MI); 572 HaveKill = false; 573 } 574 EndCf(MI); 575 break; 576 577 case AMDGPU::SI_KILL: 578 if (Depth == 0) 579 SkipIfDead(MI); 580 else 581 HaveKill = true; 582 Kill(MI); 583 break; 584 585 case AMDGPU::S_BRANCH: 586 Branch(MI); 587 break; 588 589 case AMDGPU::SI_INDIRECT_SRC_V1: 590 case AMDGPU::SI_INDIRECT_SRC_V2: 591 case AMDGPU::SI_INDIRECT_SRC_V4: 592 case AMDGPU::SI_INDIRECT_SRC_V8: 593 case AMDGPU::SI_INDIRECT_SRC_V16: 594 IndirectSrc(MI); 595 break; 596 597 case AMDGPU::SI_INDIRECT_DST_V1: 598 case AMDGPU::SI_INDIRECT_DST_V2: 599 case AMDGPU::SI_INDIRECT_DST_V4: 600 case AMDGPU::SI_INDIRECT_DST_V8: 601 case AMDGPU::SI_INDIRECT_DST_V16: 602 IndirectDst(MI); 603 break; 604 605 case AMDGPU::S_ENDPGM: { 606 if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid()) 607 break; 608 609 // Graphics shaders returning non-void shouldn't contain S_ENDPGM, 610 // because external bytecode will be appended at the end. 611 if (BI != --MF.end() || I != MBB.getFirstTerminator()) { 612 // S_ENDPGM is not the last instruction. Add an empty block at 613 // the end and jump there. 614 if (!EmptyMBBAtEnd) { 615 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 616 MF.insert(MF.end(), EmptyMBBAtEnd); 617 } 618 619 MBB.addSuccessor(EmptyMBBAtEnd); 620 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) 621 .addMBB(EmptyMBBAtEnd); 622 } 623 624 I->eraseFromParent(); 625 break; 626 } 627 } 628 } 629 } 630 631 if (NeedFlat && MFI->IsKernel) { 632 // TODO: What to use with function calls? 633 // We will need to Initialize the flat scratch register pair. 634 if (NeedFlat) 635 MFI->setHasFlatInstructions(true); 636 } 637 638 return true; 639 } 640