1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass lowers the pseudo control flow instructions to real 12 /// machine instructions. 13 /// 14 /// All control flow is handled using predicated instructions and 15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17 /// by writting to the 64-bit EXEC register (each bit corresponds to a 18 /// single vector ALU). Typically, for predicates, a vector ALU will write 19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the 21 /// EXEC to update the predicates. 22 /// 23 /// For example: 24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25 /// %SGPR0 = SI_IF %VCC 26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27 /// %SGPR0 = SI_ELSE %SGPR0 28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29 /// SI_END_CF %SGPR0 30 /// 31 /// becomes: 32 /// 33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional 36 /// // optimization which allows us to 37 /// // branch if all the bits of 38 /// // EXEC are zero. 39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40 /// 41 /// label0: 42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44 /// S_BRANCH_EXECZ label1 // Use our branch optimization 45 /// // instruction again. 46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47 /// label1: 48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49 //===----------------------------------------------------------------------===// 50 51 #include "AMDGPU.h" 52 #include "AMDGPUSubtarget.h" 53 #include "SIInstrInfo.h" 54 #include "SIMachineFunctionInfo.h" 55 #include "llvm/CodeGen/MachineFrameInfo.h" 56 #include "llvm/CodeGen/MachineFunction.h" 57 #include "llvm/CodeGen/MachineFunctionPass.h" 58 #include "llvm/CodeGen/MachineInstrBuilder.h" 59 #include "llvm/CodeGen/MachineRegisterInfo.h" 60 #include "llvm/IR/Constants.h" 61 62 using namespace llvm; 63 64 #define DEBUG_TYPE "si-lower-control-flow" 65 66 namespace { 67 68 class SILowerControlFlow : public MachineFunctionPass { 69 private: 70 static const unsigned SkipThreshold = 12; 71 72 const SIRegisterInfo *TRI; 73 const SIInstrInfo *TII; 74 75 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 76 77 void Skip(MachineInstr &From, MachineOperand &To); 78 void SkipIfDead(MachineInstr &MI); 79 80 void If(MachineInstr &MI); 81 void Else(MachineInstr &MI, bool ExecModified); 82 void Break(MachineInstr &MI); 83 void IfBreak(MachineInstr &MI); 84 void ElseBreak(MachineInstr &MI); 85 void Loop(MachineInstr &MI); 86 void EndCf(MachineInstr &MI); 87 88 void Kill(MachineInstr &MI); 89 void Branch(MachineInstr &MI); 90 91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0); 92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset); 93 void IndirectSrc(MachineInstr &MI); 94 void IndirectDst(MachineInstr &MI); 95 96 public: 97 static char ID; 98 99 SILowerControlFlow() : 100 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 101 102 bool runOnMachineFunction(MachineFunction &MF) override; 103 104 const char *getPassName() const override { 105 return "SI Lower control flow pseudo instructions"; 106 } 107 108 void getAnalysisUsage(AnalysisUsage &AU) const override { 109 AU.setPreservesCFG(); 110 MachineFunctionPass::getAnalysisUsage(AU); 111 } 112 }; 113 114 } // End anonymous namespace 115 116 char SILowerControlFlow::ID = 0; 117 118 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE, 119 "SI lower control flow", false, false) 120 121 char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID; 122 123 124 FunctionPass *llvm::createSILowerControlFlowPass() { 125 return new SILowerControlFlow(); 126 } 127 128 bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From, 129 MachineBasicBlock *To) { 130 131 unsigned NumInstr = 0; 132 133 for (MachineFunction::iterator MBBI = MachineFunction::iterator(From), 134 ToI = MachineFunction::iterator(To); MBBI != ToI; ++MBBI) { 135 136 MachineBasicBlock &MBB = *MBBI; 137 138 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 139 NumInstr < SkipThreshold && I != E; ++I) { 140 141 if (I->isBundle() || !I->isBundled()) { 142 // When a uniform loop is inside non-uniform control flow, the branch 143 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken 144 // when EXEC = 0. We should skip the loop lest it becomes infinite. 145 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ) 146 return true; 147 148 if (++NumInstr >= SkipThreshold) 149 return true; 150 } 151 } 152 } 153 154 return false; 155 } 156 157 void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) { 158 159 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 160 return; 161 162 DebugLoc DL = From.getDebugLoc(); 163 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 164 .addOperand(To); 165 } 166 167 void SILowerControlFlow::SkipIfDead(MachineInstr &MI) { 168 169 MachineBasicBlock &MBB = *MI.getParent(); 170 DebugLoc DL = MI.getDebugLoc(); 171 172 if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS || 173 !shouldSkip(&MBB, &MBB.getParent()->back())) 174 return; 175 176 MachineBasicBlock::iterator Insert = &MI; 177 ++Insert; 178 179 // If the exec mask is non-zero, skip the next two instructions 180 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 181 .addImm(3); 182 183 // Exec mask is zero: Export to NULL target... 184 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 185 .addImm(0) 186 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 187 .addImm(0) 188 .addImm(1) 189 .addImm(1) 190 .addReg(AMDGPU::VGPR0) 191 .addReg(AMDGPU::VGPR0) 192 .addReg(AMDGPU::VGPR0) 193 .addReg(AMDGPU::VGPR0); 194 195 // ... and terminate wavefront 196 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 197 } 198 199 void SILowerControlFlow::If(MachineInstr &MI) { 200 MachineBasicBlock &MBB = *MI.getParent(); 201 DebugLoc DL = MI.getDebugLoc(); 202 unsigned Reg = MI.getOperand(0).getReg(); 203 unsigned Vcc = MI.getOperand(1).getReg(); 204 205 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 206 .addReg(Vcc); 207 208 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 209 .addReg(AMDGPU::EXEC) 210 .addReg(Reg); 211 212 Skip(MI, MI.getOperand(2)); 213 214 MI.eraseFromParent(); 215 } 216 217 void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) { 218 MachineBasicBlock &MBB = *MI.getParent(); 219 DebugLoc DL = MI.getDebugLoc(); 220 unsigned Dst = MI.getOperand(0).getReg(); 221 unsigned Src = MI.getOperand(1).getReg(); 222 223 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 224 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 225 .addReg(Src); // Saved EXEC 226 227 if (ExecModified) { 228 // Adjust the saved exec to account for the modifications during the flow 229 // block that contains the ELSE. This can happen when WQM mode is switched 230 // off. 231 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) 232 .addReg(AMDGPU::EXEC) 233 .addReg(Dst); 234 } 235 236 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 237 .addReg(AMDGPU::EXEC) 238 .addReg(Dst); 239 240 Skip(MI, MI.getOperand(2)); 241 242 MI.eraseFromParent(); 243 } 244 245 void SILowerControlFlow::Break(MachineInstr &MI) { 246 MachineBasicBlock &MBB = *MI.getParent(); 247 DebugLoc DL = MI.getDebugLoc(); 248 249 unsigned Dst = MI.getOperand(0).getReg(); 250 unsigned Src = MI.getOperand(1).getReg(); 251 252 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 253 .addReg(AMDGPU::EXEC) 254 .addReg(Src); 255 256 MI.eraseFromParent(); 257 } 258 259 void SILowerControlFlow::IfBreak(MachineInstr &MI) { 260 MachineBasicBlock &MBB = *MI.getParent(); 261 DebugLoc DL = MI.getDebugLoc(); 262 263 unsigned Dst = MI.getOperand(0).getReg(); 264 unsigned Vcc = MI.getOperand(1).getReg(); 265 unsigned Src = MI.getOperand(2).getReg(); 266 267 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 268 .addReg(Vcc) 269 .addReg(Src); 270 271 MI.eraseFromParent(); 272 } 273 274 void SILowerControlFlow::ElseBreak(MachineInstr &MI) { 275 MachineBasicBlock &MBB = *MI.getParent(); 276 DebugLoc DL = MI.getDebugLoc(); 277 278 unsigned Dst = MI.getOperand(0).getReg(); 279 unsigned Saved = MI.getOperand(1).getReg(); 280 unsigned Src = MI.getOperand(2).getReg(); 281 282 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 283 .addReg(Saved) 284 .addReg(Src); 285 286 MI.eraseFromParent(); 287 } 288 289 void SILowerControlFlow::Loop(MachineInstr &MI) { 290 MachineBasicBlock &MBB = *MI.getParent(); 291 DebugLoc DL = MI.getDebugLoc(); 292 unsigned Src = MI.getOperand(0).getReg(); 293 294 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 295 .addReg(AMDGPU::EXEC) 296 .addReg(Src); 297 298 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 299 .addOperand(MI.getOperand(1)); 300 301 MI.eraseFromParent(); 302 } 303 304 void SILowerControlFlow::EndCf(MachineInstr &MI) { 305 MachineBasicBlock &MBB = *MI.getParent(); 306 DebugLoc DL = MI.getDebugLoc(); 307 unsigned Reg = MI.getOperand(0).getReg(); 308 309 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 310 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 311 .addReg(AMDGPU::EXEC) 312 .addReg(Reg); 313 314 MI.eraseFromParent(); 315 } 316 317 void SILowerControlFlow::Branch(MachineInstr &MI) { 318 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode()) 319 MI.eraseFromParent(); 320 321 // If these aren't equal, this is probably an infinite loop. 322 } 323 324 void SILowerControlFlow::Kill(MachineInstr &MI) { 325 MachineBasicBlock &MBB = *MI.getParent(); 326 DebugLoc DL = MI.getDebugLoc(); 327 const MachineOperand &Op = MI.getOperand(0); 328 329 #ifndef NDEBUG 330 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv(); 331 // Kill is only allowed in pixel / geometry shaders. 332 assert(CallConv == CallingConv::AMDGPU_PS || 333 CallConv == CallingConv::AMDGPU_GS); 334 #endif 335 336 // Clear this thread from the exec mask if the operand is negative 337 if ((Op.isImm())) { 338 // Constant operand: Set exec mask to 0 or do nothing 339 if (Op.getImm() & 0x80000000) { 340 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 341 .addImm(0); 342 } 343 } else { 344 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) 345 .addImm(0) 346 .addOperand(Op); 347 } 348 349 MI.eraseFromParent(); 350 } 351 352 void SILowerControlFlow::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) { 353 354 MachineBasicBlock &MBB = *MI.getParent(); 355 DebugLoc DL = MI.getDebugLoc(); 356 MachineBasicBlock::iterator I = MI; 357 358 unsigned Save = MI.getOperand(1).getReg(); 359 unsigned Idx = MI.getOperand(3).getReg(); 360 361 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 362 if (Offset) { 363 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 364 .addReg(Idx) 365 .addImm(Offset); 366 } else { 367 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 368 .addReg(Idx); 369 } 370 MBB.insert(I, MovRel); 371 } else { 372 373 assert(AMDGPU::SReg_64RegClass.contains(Save)); 374 assert(AMDGPU::VGPR_32RegClass.contains(Idx)); 375 376 // Save the EXEC mask 377 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 378 .addReg(AMDGPU::EXEC); 379 380 // Read the next variant into VCC (lower 32 bits) <- also loop target 381 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), 382 AMDGPU::VCC_LO) 383 .addReg(Idx); 384 385 // Move index from VCC into M0 386 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 387 .addReg(AMDGPU::VCC_LO); 388 389 // Compare the just read M0 value to all possible Idx values 390 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32)) 391 .addReg(AMDGPU::M0) 392 .addReg(Idx); 393 394 // Update EXEC, save the original EXEC value to VCC 395 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 396 .addReg(AMDGPU::VCC); 397 398 if (Offset) { 399 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 400 .addReg(AMDGPU::M0) 401 .addImm(Offset); 402 } 403 // Do the actual move 404 MBB.insert(I, MovRel); 405 406 // Update EXEC, switch all done bits to 0 and all todo bits to 1 407 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 408 .addReg(AMDGPU::EXEC) 409 .addReg(AMDGPU::VCC); 410 411 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 412 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 413 .addImm(-7); 414 415 // Restore EXEC 416 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 417 .addReg(Save); 418 419 } 420 MI.eraseFromParent(); 421 } 422 423 /// \param @VecReg The register which holds element zero of the vector 424 /// being addressed into. 425 /// \param[out] @Reg The base register to use in the indirect addressing instruction. 426 /// \param[in,out] @Offset As an input, this is the constant offset part of the 427 // indirect Index. e.g. v0 = v[VecReg + Offset] 428 // As an output, this is a constant value that needs 429 // to be added to the value stored in M0. 430 void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg, 431 unsigned &Reg, 432 int &Offset) { 433 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0); 434 if (!SubReg) 435 SubReg = VecReg; 436 437 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg); 438 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset; 439 440 if (RegIdx < 0) { 441 Offset = RegIdx; 442 RegIdx = 0; 443 } else { 444 Offset = 0; 445 } 446 447 Reg = RC->getRegister(RegIdx); 448 } 449 450 void SILowerControlFlow::IndirectSrc(MachineInstr &MI) { 451 452 MachineBasicBlock &MBB = *MI.getParent(); 453 DebugLoc DL = MI.getDebugLoc(); 454 455 unsigned Dst = MI.getOperand(0).getReg(); 456 unsigned Vec = MI.getOperand(2).getReg(); 457 int Off = MI.getOperand(4).getImm(); 458 unsigned Reg; 459 460 computeIndirectRegAndOffset(Vec, Reg, Off); 461 462 MachineInstr *MovRel = 463 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 464 .addReg(Reg) 465 .addReg(Vec, RegState::Implicit); 466 467 LoadM0(MI, MovRel, Off); 468 } 469 470 void SILowerControlFlow::IndirectDst(MachineInstr &MI) { 471 472 MachineBasicBlock &MBB = *MI.getParent(); 473 DebugLoc DL = MI.getDebugLoc(); 474 475 unsigned Dst = MI.getOperand(0).getReg(); 476 int Off = MI.getOperand(4).getImm(); 477 unsigned Val = MI.getOperand(5).getReg(); 478 unsigned Reg; 479 480 computeIndirectRegAndOffset(Dst, Reg, Off); 481 482 MachineInstr *MovRel = 483 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 484 .addReg(Reg, RegState::Define) 485 .addReg(Val) 486 .addReg(Dst, RegState::Implicit); 487 488 LoadM0(MI, MovRel, Off); 489 } 490 491 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { 492 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 493 TRI = 494 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 495 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 496 497 bool HaveKill = false; 498 bool NeedFlat = false; 499 unsigned Depth = 0; 500 501 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 502 BI != BE; ++BI) { 503 504 MachineBasicBlock *EmptyMBBAtEnd = NULL; 505 MachineBasicBlock &MBB = *BI; 506 MachineBasicBlock::iterator I, Next; 507 bool ExecModified = false; 508 509 for (I = MBB.begin(); I != MBB.end(); I = Next) { 510 Next = std::next(I); 511 512 MachineInstr &MI = *I; 513 514 // Flat uses m0 in case it needs to access LDS. 515 if (TII->isFLAT(MI)) 516 NeedFlat = true; 517 518 for (const auto &Def : I->defs()) { 519 if (Def.isReg() && Def.isDef() && Def.getReg() == AMDGPU::EXEC) { 520 ExecModified = true; 521 break; 522 } 523 } 524 525 switch (MI.getOpcode()) { 526 default: break; 527 case AMDGPU::SI_IF: 528 ++Depth; 529 If(MI); 530 break; 531 532 case AMDGPU::SI_ELSE: 533 Else(MI, ExecModified); 534 break; 535 536 case AMDGPU::SI_BREAK: 537 Break(MI); 538 break; 539 540 case AMDGPU::SI_IF_BREAK: 541 IfBreak(MI); 542 break; 543 544 case AMDGPU::SI_ELSE_BREAK: 545 ElseBreak(MI); 546 break; 547 548 case AMDGPU::SI_LOOP: 549 ++Depth; 550 Loop(MI); 551 break; 552 553 case AMDGPU::SI_END_CF: 554 if (--Depth == 0 && HaveKill) { 555 SkipIfDead(MI); 556 HaveKill = false; 557 } 558 EndCf(MI); 559 break; 560 561 case AMDGPU::SI_KILL: 562 if (Depth == 0) 563 SkipIfDead(MI); 564 else 565 HaveKill = true; 566 Kill(MI); 567 break; 568 569 case AMDGPU::S_BRANCH: 570 Branch(MI); 571 break; 572 573 case AMDGPU::SI_INDIRECT_SRC_V1: 574 case AMDGPU::SI_INDIRECT_SRC_V2: 575 case AMDGPU::SI_INDIRECT_SRC_V4: 576 case AMDGPU::SI_INDIRECT_SRC_V8: 577 case AMDGPU::SI_INDIRECT_SRC_V16: 578 IndirectSrc(MI); 579 break; 580 581 case AMDGPU::SI_INDIRECT_DST_V1: 582 case AMDGPU::SI_INDIRECT_DST_V2: 583 case AMDGPU::SI_INDIRECT_DST_V4: 584 case AMDGPU::SI_INDIRECT_DST_V8: 585 case AMDGPU::SI_INDIRECT_DST_V16: 586 IndirectDst(MI); 587 break; 588 589 case AMDGPU::S_ENDPGM: { 590 if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid()) 591 break; 592 593 // Graphics shaders returning non-void shouldn't contain S_ENDPGM, 594 // because external bytecode will be appended at the end. 595 if (BI != --MF.end() || I != MBB.getFirstTerminator()) { 596 // S_ENDPGM is not the last instruction. Add an empty block at 597 // the end and jump there. 598 if (!EmptyMBBAtEnd) { 599 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 600 MF.insert(MF.end(), EmptyMBBAtEnd); 601 } 602 603 MBB.addSuccessor(EmptyMBBAtEnd); 604 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) 605 .addMBB(EmptyMBBAtEnd); 606 } 607 608 I->eraseFromParent(); 609 break; 610 } 611 } 612 } 613 } 614 615 if (NeedFlat && MFI->IsKernel) { 616 // TODO: What to use with function calls? 617 // We will need to Initialize the flat scratch register pair. 618 if (NeedFlat) 619 MFI->setHasFlatInstructions(true); 620 } 621 622 return true; 623 } 624