1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This pass compute turns all control flow pseudo instructions into native one 12 /// computing their address on the fly ; it also sets STACK_SIZE info. 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Support/Debug.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUSubtarget.h" 18 #include "R600Defines.h" 19 #include "R600InstrInfo.h" 20 #include "R600MachineFunctionInfo.h" 21 #include "R600RegisterInfo.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 using namespace llvm; 28 29 #define DEBUG_TYPE "r600cf" 30 31 namespace { 32 33 struct CFStack { 34 35 enum StackItem { 36 ENTRY = 0, 37 SUB_ENTRY = 1, 38 FIRST_NON_WQM_PUSH = 2, 39 FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3 40 }; 41 42 const AMDGPUSubtarget *ST; 43 std::vector<StackItem> BranchStack; 44 std::vector<StackItem> LoopStack; 45 unsigned MaxStackSize; 46 unsigned CurrentEntries; 47 unsigned CurrentSubEntries; 48 49 CFStack(const AMDGPUSubtarget *st, CallingConv::ID cc) : ST(st), 50 // We need to reserve a stack entry for CALL_FS in vertex shaders. 51 MaxStackSize(cc == CallingConv::AMDGPU_VS ? 1 : 0), 52 CurrentEntries(0), CurrentSubEntries(0) { } 53 54 unsigned getLoopDepth(); 55 bool branchStackContains(CFStack::StackItem); 56 bool requiresWorkAroundForInst(unsigned Opcode); 57 unsigned getSubEntrySize(CFStack::StackItem Item); 58 void updateMaxStackSize(); 59 void pushBranch(unsigned Opcode, bool isWQM = false); 60 void pushLoop(); 61 void popBranch(); 62 void popLoop(); 63 }; 64 65 unsigned CFStack::getLoopDepth() { 66 return LoopStack.size(); 67 } 68 69 bool CFStack::branchStackContains(CFStack::StackItem Item) { 70 for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(), 71 E = BranchStack.end(); I != E; ++I) { 72 if (*I == Item) 73 return true; 74 } 75 return false; 76 } 77 78 bool CFStack::requiresWorkAroundForInst(unsigned Opcode) { 79 if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST->hasCaymanISA() && 80 getLoopDepth() > 1) 81 return true; 82 83 if (!ST->hasCFAluBug()) 84 return false; 85 86 switch(Opcode) { 87 default: return false; 88 case AMDGPU::CF_ALU_PUSH_BEFORE: 89 case AMDGPU::CF_ALU_ELSE_AFTER: 90 case AMDGPU::CF_ALU_BREAK: 91 case AMDGPU::CF_ALU_CONTINUE: 92 if (CurrentSubEntries == 0) 93 return false; 94 if (ST->getWavefrontSize() == 64) { 95 // We are being conservative here. We only require this work-around if 96 // CurrentSubEntries > 3 && 97 // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0) 98 // 99 // We have to be conservative, because we don't know for certain that 100 // our stack allocation algorithm for Evergreen/NI is correct. Applying this 101 // work-around when CurrentSubEntries > 3 allows us to over-allocate stack 102 // resources without any problems. 103 return CurrentSubEntries > 3; 104 } else { 105 assert(ST->getWavefrontSize() == 32); 106 // We are being conservative here. We only require the work-around if 107 // CurrentSubEntries > 7 && 108 // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0) 109 // See the comment on the wavefront size == 64 case for why we are 110 // being conservative. 111 return CurrentSubEntries > 7; 112 } 113 } 114 } 115 116 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) { 117 switch(Item) { 118 default: 119 return 0; 120 case CFStack::FIRST_NON_WQM_PUSH: 121 assert(!ST->hasCaymanISA()); 122 if (ST->getGeneration() <= AMDGPUSubtarget::R700) { 123 // +1 For the push operation. 124 // +2 Extra space required. 125 return 3; 126 } else { 127 // Some documentation says that this is not necessary on Evergreen, 128 // but experimentation has show that we need to allocate 1 extra 129 // sub-entry for the first non-WQM push. 130 // +1 For the push operation. 131 // +1 Extra space required. 132 return 2; 133 } 134 case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY: 135 assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN); 136 // +1 For the push operation. 137 // +1 Extra space required. 138 return 2; 139 case CFStack::SUB_ENTRY: 140 return 1; 141 } 142 } 143 144 void CFStack::updateMaxStackSize() { 145 unsigned CurrentStackSize = 146 CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4); 147 MaxStackSize = std::max(CurrentStackSize, MaxStackSize); 148 } 149 150 void CFStack::pushBranch(unsigned Opcode, bool isWQM) { 151 CFStack::StackItem Item = CFStack::ENTRY; 152 switch(Opcode) { 153 case AMDGPU::CF_PUSH_EG: 154 case AMDGPU::CF_ALU_PUSH_BEFORE: 155 if (!isWQM) { 156 if (!ST->hasCaymanISA() && 157 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH)) 158 Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI 159 // See comment in 160 // CFStack::getSubEntrySize() 161 else if (CurrentEntries > 0 && 162 ST->getGeneration() > AMDGPUSubtarget::EVERGREEN && 163 !ST->hasCaymanISA() && 164 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY)) 165 Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY; 166 else 167 Item = CFStack::SUB_ENTRY; 168 } else 169 Item = CFStack::ENTRY; 170 break; 171 } 172 BranchStack.push_back(Item); 173 if (Item == CFStack::ENTRY) 174 CurrentEntries++; 175 else 176 CurrentSubEntries += getSubEntrySize(Item); 177 updateMaxStackSize(); 178 } 179 180 void CFStack::pushLoop() { 181 LoopStack.push_back(CFStack::ENTRY); 182 CurrentEntries++; 183 updateMaxStackSize(); 184 } 185 186 void CFStack::popBranch() { 187 CFStack::StackItem Top = BranchStack.back(); 188 if (Top == CFStack::ENTRY) 189 CurrentEntries--; 190 else 191 CurrentSubEntries-= getSubEntrySize(Top); 192 BranchStack.pop_back(); 193 } 194 195 void CFStack::popLoop() { 196 CurrentEntries--; 197 LoopStack.pop_back(); 198 } 199 200 class R600ControlFlowFinalizer : public MachineFunctionPass { 201 202 private: 203 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile; 204 205 enum ControlFlowInstruction { 206 CF_TC, 207 CF_VC, 208 CF_CALL_FS, 209 CF_WHILE_LOOP, 210 CF_END_LOOP, 211 CF_LOOP_BREAK, 212 CF_LOOP_CONTINUE, 213 CF_JUMP, 214 CF_ELSE, 215 CF_POP, 216 CF_END 217 }; 218 219 static char ID; 220 const R600InstrInfo *TII; 221 const R600RegisterInfo *TRI; 222 unsigned MaxFetchInst; 223 const AMDGPUSubtarget *ST; 224 225 bool IsTrivialInst(MachineInstr *MI) const { 226 switch (MI->getOpcode()) { 227 case AMDGPU::KILL: 228 case AMDGPU::RETURN: 229 return true; 230 default: 231 return false; 232 } 233 } 234 235 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const { 236 unsigned Opcode = 0; 237 bool isEg = (ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN); 238 switch (CFI) { 239 case CF_TC: 240 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600; 241 break; 242 case CF_VC: 243 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600; 244 break; 245 case CF_CALL_FS: 246 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600; 247 break; 248 case CF_WHILE_LOOP: 249 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600; 250 break; 251 case CF_END_LOOP: 252 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600; 253 break; 254 case CF_LOOP_BREAK: 255 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600; 256 break; 257 case CF_LOOP_CONTINUE: 258 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600; 259 break; 260 case CF_JUMP: 261 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600; 262 break; 263 case CF_ELSE: 264 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600; 265 break; 266 case CF_POP: 267 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600; 268 break; 269 case CF_END: 270 if (ST->hasCaymanISA()) { 271 Opcode = AMDGPU::CF_END_CM; 272 break; 273 } 274 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600; 275 break; 276 } 277 assert (Opcode && "No opcode selected"); 278 return TII->get(Opcode); 279 } 280 281 bool isCompatibleWithClause(const MachineInstr *MI, 282 std::set<unsigned> &DstRegs) const { 283 unsigned DstMI, SrcMI; 284 for (MachineInstr::const_mop_iterator I = MI->operands_begin(), 285 E = MI->operands_end(); I != E; ++I) { 286 const MachineOperand &MO = *I; 287 if (!MO.isReg()) 288 continue; 289 if (MO.isDef()) { 290 unsigned Reg = MO.getReg(); 291 if (AMDGPU::R600_Reg128RegClass.contains(Reg)) 292 DstMI = Reg; 293 else 294 DstMI = TRI->getMatchingSuperReg(Reg, 295 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)), 296 &AMDGPU::R600_Reg128RegClass); 297 } 298 if (MO.isUse()) { 299 unsigned Reg = MO.getReg(); 300 if (AMDGPU::R600_Reg128RegClass.contains(Reg)) 301 SrcMI = Reg; 302 else 303 SrcMI = TRI->getMatchingSuperReg(Reg, 304 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)), 305 &AMDGPU::R600_Reg128RegClass); 306 } 307 } 308 if ((DstRegs.find(SrcMI) == DstRegs.end())) { 309 DstRegs.insert(DstMI); 310 return true; 311 } else 312 return false; 313 } 314 315 ClauseFile 316 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I) 317 const { 318 MachineBasicBlock::iterator ClauseHead = I; 319 std::vector<MachineInstr *> ClauseContent; 320 unsigned AluInstCount = 0; 321 bool IsTex = TII->usesTextureCache(ClauseHead); 322 std::set<unsigned> DstRegs; 323 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) { 324 if (IsTrivialInst(I)) 325 continue; 326 if (AluInstCount >= MaxFetchInst) 327 break; 328 if ((IsTex && !TII->usesTextureCache(I)) || 329 (!IsTex && !TII->usesVertexCache(I))) 330 break; 331 if (!isCompatibleWithClause(I, DstRegs)) 332 break; 333 AluInstCount ++; 334 ClauseContent.push_back(I); 335 } 336 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), 337 getHWInstrDesc(IsTex?CF_TC:CF_VC)) 338 .addImm(0) // ADDR 339 .addImm(AluInstCount - 1); // COUNT 340 return ClauseFile(MIb, std::move(ClauseContent)); 341 } 342 343 void getLiteral(MachineInstr *MI, std::vector<MachineOperand *> &Lits) const { 344 static const unsigned LiteralRegs[] = { 345 AMDGPU::ALU_LITERAL_X, 346 AMDGPU::ALU_LITERAL_Y, 347 AMDGPU::ALU_LITERAL_Z, 348 AMDGPU::ALU_LITERAL_W 349 }; 350 const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs = 351 TII->getSrcs(MI); 352 for (const auto &Src:Srcs) { 353 if (Src.first->getReg() != AMDGPU::ALU_LITERAL_X) 354 continue; 355 int64_t Imm = Src.second; 356 std::vector<MachineOperand*>::iterator It = 357 std::find_if(Lits.begin(), Lits.end(), 358 [&](MachineOperand* val) 359 { return val->isImm() && (val->getImm() == Imm);}); 360 361 // Get corresponding Operand 362 MachineOperand &Operand = MI->getOperand( 363 TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)); 364 365 if (It != Lits.end()) { 366 // Reuse existing literal reg 367 unsigned Index = It - Lits.begin(); 368 Src.first->setReg(LiteralRegs[Index]); 369 } else { 370 // Allocate new literal reg 371 assert(Lits.size() < 4 && "Too many literals in Instruction Group"); 372 Src.first->setReg(LiteralRegs[Lits.size()]); 373 Lits.push_back(&Operand); 374 } 375 } 376 } 377 378 MachineBasicBlock::iterator insertLiterals( 379 MachineBasicBlock::iterator InsertPos, 380 const std::vector<unsigned> &Literals) const { 381 MachineBasicBlock *MBB = InsertPos->getParent(); 382 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) { 383 unsigned LiteralPair0 = Literals[i]; 384 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0; 385 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(), 386 TII->get(AMDGPU::LITERALS)) 387 .addImm(LiteralPair0) 388 .addImm(LiteralPair1); 389 } 390 return InsertPos; 391 } 392 393 ClauseFile 394 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I) 395 const { 396 MachineBasicBlock::iterator ClauseHead = I; 397 std::vector<MachineInstr *> ClauseContent; 398 I++; 399 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) { 400 if (IsTrivialInst(I)) { 401 ++I; 402 continue; 403 } 404 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode())) 405 break; 406 std::vector<MachineOperand *>Literals; 407 if (I->isBundle()) { 408 MachineInstr *DeleteMI = I; 409 MachineBasicBlock::instr_iterator BI = I.getInstrIterator(); 410 while (++BI != E && BI->isBundledWithPred()) { 411 BI->unbundleFromPred(); 412 for (MachineOperand &MO : BI->operands()) { 413 if (MO.isReg() && MO.isInternalRead()) 414 MO.setIsInternalRead(false); 415 } 416 getLiteral(&*BI, Literals); 417 ClauseContent.push_back(&*BI); 418 } 419 I = BI; 420 DeleteMI->eraseFromParent(); 421 } else { 422 getLiteral(I, Literals); 423 ClauseContent.push_back(I); 424 I++; 425 } 426 for (unsigned i = 0, e = Literals.size(); i < e; i += 2) { 427 MachineInstrBuilder MILit = BuildMI(MBB, I, I->getDebugLoc(), 428 TII->get(AMDGPU::LITERALS)); 429 if (Literals[i]->isImm()) { 430 MILit.addImm(Literals[i]->getImm()); 431 } else { 432 MILit.addGlobalAddress(Literals[i]->getGlobal(), 433 Literals[i]->getOffset()); 434 } 435 if (i + 1 < e) { 436 if (Literals[i + 1]->isImm()) { 437 MILit.addImm(Literals[i + 1]->getImm()); 438 } else { 439 MILit.addGlobalAddress(Literals[i + 1]->getGlobal(), 440 Literals[i + 1]->getOffset()); 441 } 442 } else 443 MILit.addImm(0); 444 ClauseContent.push_back(MILit); 445 } 446 } 447 assert(ClauseContent.size() < 128 && "ALU clause is too big"); 448 ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1); 449 return ClauseFile(ClauseHead, std::move(ClauseContent)); 450 } 451 452 void 453 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause, 454 unsigned &CfCount) { 455 CounterPropagateAddr(Clause.first, CfCount); 456 MachineBasicBlock *BB = Clause.first->getParent(); 457 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE)) 458 .addImm(CfCount); 459 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) { 460 BB->splice(InsertPos, BB, Clause.second[i]); 461 } 462 CfCount += 2 * Clause.second.size(); 463 } 464 465 void 466 EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause, 467 unsigned &CfCount) { 468 Clause.first->getOperand(0).setImm(0); 469 CounterPropagateAddr(Clause.first, CfCount); 470 MachineBasicBlock *BB = Clause.first->getParent(); 471 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE)) 472 .addImm(CfCount); 473 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) { 474 BB->splice(InsertPos, BB, Clause.second[i]); 475 } 476 CfCount += Clause.second.size(); 477 } 478 479 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const { 480 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm()); 481 } 482 void CounterPropagateAddr(const std::set<MachineInstr *> &MIs, 483 unsigned Addr) const { 484 for (MachineInstr *MI : MIs) { 485 CounterPropagateAddr(MI, Addr); 486 } 487 } 488 489 public: 490 R600ControlFlowFinalizer(TargetMachine &tm) 491 : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {} 492 493 bool runOnMachineFunction(MachineFunction &MF) override { 494 ST = &MF.getSubtarget<AMDGPUSubtarget>(); 495 MaxFetchInst = ST->getTexVTXClauseSize(); 496 TII = static_cast<const R600InstrInfo *>(ST->getInstrInfo()); 497 TRI = static_cast<const R600RegisterInfo *>(ST->getRegisterInfo()); 498 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 499 500 CFStack CFStack(ST, MF.getFunction()->getCallingConv()); 501 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME; 502 ++MB) { 503 MachineBasicBlock &MBB = *MB; 504 unsigned CfCount = 0; 505 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack; 506 std::vector<MachineInstr * > IfThenElseStack; 507 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) { 508 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), 509 getHWInstrDesc(CF_CALL_FS)); 510 CfCount++; 511 } 512 std::vector<ClauseFile> FetchClauses, AluClauses; 513 std::vector<MachineInstr *> LastAlu(1); 514 std::vector<MachineInstr *> ToPopAfter; 515 516 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 517 I != E;) { 518 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) { 519 DEBUG(dbgs() << CfCount << ":"; I->dump();); 520 FetchClauses.push_back(MakeFetchClause(MBB, I)); 521 CfCount++; 522 LastAlu.back() = nullptr; 523 continue; 524 } 525 526 MachineBasicBlock::iterator MI = I; 527 if (MI->getOpcode() != AMDGPU::ENDIF) 528 LastAlu.back() = nullptr; 529 if (MI->getOpcode() == AMDGPU::CF_ALU) 530 LastAlu.back() = MI; 531 I++; 532 bool RequiresWorkAround = 533 CFStack.requiresWorkAroundForInst(MI->getOpcode()); 534 switch (MI->getOpcode()) { 535 case AMDGPU::CF_ALU_PUSH_BEFORE: 536 if (RequiresWorkAround) { 537 DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n"); 538 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG)) 539 .addImm(CfCount + 1) 540 .addImm(1); 541 MI->setDesc(TII->get(AMDGPU::CF_ALU)); 542 CfCount++; 543 CFStack.pushBranch(AMDGPU::CF_PUSH_EG); 544 } else 545 CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE); 546 547 case AMDGPU::CF_ALU: 548 I = MI; 549 AluClauses.push_back(MakeALUClause(MBB, I)); 550 DEBUG(dbgs() << CfCount << ":"; MI->dump();); 551 CfCount++; 552 break; 553 case AMDGPU::WHILELOOP: { 554 CFStack.pushLoop(); 555 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 556 getHWInstrDesc(CF_WHILE_LOOP)) 557 .addImm(1); 558 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount, 559 std::set<MachineInstr *>()); 560 Pair.second.insert(MIb); 561 LoopStack.push_back(std::move(Pair)); 562 MI->eraseFromParent(); 563 CfCount++; 564 break; 565 } 566 case AMDGPU::ENDLOOP: { 567 CFStack.popLoop(); 568 std::pair<unsigned, std::set<MachineInstr *> > Pair = 569 std::move(LoopStack.back()); 570 LoopStack.pop_back(); 571 CounterPropagateAddr(Pair.second, CfCount); 572 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP)) 573 .addImm(Pair.first + 1); 574 MI->eraseFromParent(); 575 CfCount++; 576 break; 577 } 578 case AMDGPU::IF_PREDICATE_SET: { 579 LastAlu.push_back(nullptr); 580 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 581 getHWInstrDesc(CF_JUMP)) 582 .addImm(0) 583 .addImm(0); 584 IfThenElseStack.push_back(MIb); 585 DEBUG(dbgs() << CfCount << ":"; MIb->dump();); 586 MI->eraseFromParent(); 587 CfCount++; 588 break; 589 } 590 case AMDGPU::ELSE: { 591 MachineInstr * JumpInst = IfThenElseStack.back(); 592 IfThenElseStack.pop_back(); 593 CounterPropagateAddr(JumpInst, CfCount); 594 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 595 getHWInstrDesc(CF_ELSE)) 596 .addImm(0) 597 .addImm(0); 598 DEBUG(dbgs() << CfCount << ":"; MIb->dump();); 599 IfThenElseStack.push_back(MIb); 600 MI->eraseFromParent(); 601 CfCount++; 602 break; 603 } 604 case AMDGPU::ENDIF: { 605 CFStack.popBranch(); 606 if (LastAlu.back()) { 607 ToPopAfter.push_back(LastAlu.back()); 608 } else { 609 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 610 getHWInstrDesc(CF_POP)) 611 .addImm(CfCount + 1) 612 .addImm(1); 613 (void)MIb; 614 DEBUG(dbgs() << CfCount << ":"; MIb->dump();); 615 CfCount++; 616 } 617 618 MachineInstr *IfOrElseInst = IfThenElseStack.back(); 619 IfThenElseStack.pop_back(); 620 CounterPropagateAddr(IfOrElseInst, CfCount); 621 IfOrElseInst->getOperand(1).setImm(1); 622 LastAlu.pop_back(); 623 MI->eraseFromParent(); 624 break; 625 } 626 case AMDGPU::BREAK: { 627 CfCount ++; 628 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 629 getHWInstrDesc(CF_LOOP_BREAK)) 630 .addImm(0); 631 LoopStack.back().second.insert(MIb); 632 MI->eraseFromParent(); 633 break; 634 } 635 case AMDGPU::CONTINUE: { 636 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), 637 getHWInstrDesc(CF_LOOP_CONTINUE)) 638 .addImm(0); 639 LoopStack.back().second.insert(MIb); 640 MI->eraseFromParent(); 641 CfCount++; 642 break; 643 } 644 case AMDGPU::RETURN: { 645 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END)); 646 CfCount++; 647 if (CfCount % 2) { 648 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD)); 649 CfCount++; 650 } 651 MI->eraseFromParent(); 652 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++) 653 EmitFetchClause(I, FetchClauses[i], CfCount); 654 for (unsigned i = 0, e = AluClauses.size(); i < e; i++) 655 EmitALUClause(I, AluClauses[i], CfCount); 656 break; 657 } 658 default: 659 if (TII->isExport(MI->getOpcode())) { 660 DEBUG(dbgs() << CfCount << ":"; MI->dump();); 661 CfCount++; 662 } 663 break; 664 } 665 } 666 for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) { 667 MachineInstr *Alu = ToPopAfter[i]; 668 BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu), 669 TII->get(AMDGPU::CF_ALU_POP_AFTER)) 670 .addImm(Alu->getOperand(0).getImm()) 671 .addImm(Alu->getOperand(1).getImm()) 672 .addImm(Alu->getOperand(2).getImm()) 673 .addImm(Alu->getOperand(3).getImm()) 674 .addImm(Alu->getOperand(4).getImm()) 675 .addImm(Alu->getOperand(5).getImm()) 676 .addImm(Alu->getOperand(6).getImm()) 677 .addImm(Alu->getOperand(7).getImm()) 678 .addImm(Alu->getOperand(8).getImm()); 679 Alu->eraseFromParent(); 680 } 681 MFI->StackSize = CFStack.MaxStackSize; 682 } 683 684 return false; 685 } 686 687 const char *getPassName() const override { 688 return "R600 Control Flow Finalizer Pass"; 689 } 690 }; 691 692 char R600ControlFlowFinalizer::ID = 0; 693 694 } // end anonymous namespace 695 696 697 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) { 698 return new R600ControlFlowFinalizer(TM); 699 } 700