1 //===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass inserts branches on the 0 exec mask over divergent branches 12 /// branches when it's expected that jumping over the untaken control flow will 13 /// be cheaper than having every workitem no-op through it. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "AMDGPU.h" 18 #include "AMDGPUSubtarget.h" 19 #include "SIInstrInfo.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringRef.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineFunctionPass.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/IR/CallingConv.h" 30 #include "llvm/IR/DebugLoc.h" 31 #include "llvm/MC/MCAsmInfo.h" 32 #include "llvm/Pass.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include <cassert> 36 #include <cstdint> 37 #include <iterator> 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "si-insert-skips" 42 43 static cl::opt<unsigned> SkipThresholdFlag( 44 "amdgpu-skip-threshold", 45 cl::desc("Number of instructions before jumping over divergent control flow"), 46 cl::init(12), cl::Hidden); 47 48 namespace { 49 50 class SIInsertSkips : public MachineFunctionPass { 51 private: 52 const SIRegisterInfo *TRI = nullptr; 53 const SIInstrInfo *TII = nullptr; 54 unsigned SkipThreshold = 0; 55 56 bool shouldSkip(const MachineBasicBlock &From, 57 const MachineBasicBlock &To) const; 58 59 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB); 60 61 void kill(MachineInstr &MI); 62 63 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB, 64 MachineBasicBlock::iterator I) const; 65 66 bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB); 67 68 public: 69 static char ID; 70 71 SIInsertSkips() : MachineFunctionPass(ID) {} 72 73 bool runOnMachineFunction(MachineFunction &MF) override; 74 75 StringRef getPassName() const override { 76 return "SI insert s_cbranch_execz instructions"; 77 } 78 79 void getAnalysisUsage(AnalysisUsage &AU) const override { 80 MachineFunctionPass::getAnalysisUsage(AU); 81 } 82 }; 83 84 } // end anonymous namespace 85 86 char SIInsertSkips::ID = 0; 87 88 INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE, 89 "SI insert s_cbranch_execz instructions", false, false) 90 91 char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID; 92 93 static bool opcodeEmitsNoInsts(unsigned Opc) { 94 switch (Opc) { 95 case TargetOpcode::IMPLICIT_DEF: 96 case TargetOpcode::KILL: 97 case TargetOpcode::BUNDLE: 98 case TargetOpcode::CFI_INSTRUCTION: 99 case TargetOpcode::EH_LABEL: 100 case TargetOpcode::GC_LABEL: 101 case TargetOpcode::DBG_VALUE: 102 return true; 103 default: 104 return false; 105 } 106 } 107 108 bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From, 109 const MachineBasicBlock &To) const { 110 if (From.succ_empty()) 111 return false; 112 113 unsigned NumInstr = 0; 114 const MachineFunction *MF = From.getParent(); 115 116 for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end(); 117 MBBI != End && MBBI != ToI; ++MBBI) { 118 const MachineBasicBlock &MBB = *MBBI; 119 120 for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end(); 121 NumInstr < SkipThreshold && I != E; ++I) { 122 if (opcodeEmitsNoInsts(I->getOpcode())) 123 continue; 124 125 // FIXME: Since this is required for correctness, this should be inserted 126 // during SILowerControlFlow. 127 128 // When a uniform loop is inside non-uniform control flow, the branch 129 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken 130 // when EXEC = 0. We should skip the loop lest it becomes infinite. 131 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || 132 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) 133 return true; 134 135 // V_READFIRSTLANE/V_READLANE destination register may be used as operand 136 // by some SALU instruction. If exec mask is zero vector instruction 137 // defining the register that is used by the scalar one is not executed 138 // and scalar instruction will operate on undefined data. For 139 // V_READFIRSTLANE/V_READLANE we should avoid predicated execution. 140 if ((I->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) || 141 (I->getOpcode() == AMDGPU::V_READLANE_B32)) { 142 return true; 143 } 144 145 if (I->isInlineAsm()) { 146 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 147 const char *AsmStr = I->getOperand(0).getSymbolName(); 148 149 // inlineasm length estimate is number of bytes assuming the longest 150 // instruction. 151 uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI); 152 NumInstr += MaxAsmSize / MAI->getMaxInstLength(); 153 } else { 154 ++NumInstr; 155 } 156 157 if (NumInstr >= SkipThreshold) 158 return true; 159 } 160 } 161 162 return false; 163 } 164 165 bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) { 166 MachineBasicBlock &MBB = *MI.getParent(); 167 MachineFunction *MF = MBB.getParent(); 168 169 if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS || 170 !shouldSkip(MBB, MBB.getParent()->back())) 171 return false; 172 173 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator()); 174 175 const DebugLoc &DL = MI.getDebugLoc(); 176 177 // If the exec mask is non-zero, skip the next two instructions 178 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 179 .addMBB(&NextBB); 180 181 MachineBasicBlock::iterator Insert = SkipBB->begin(); 182 183 // Exec mask is zero: Export to NULL target... 184 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE)) 185 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 186 .addReg(AMDGPU::VGPR0, RegState::Undef) 187 .addReg(AMDGPU::VGPR0, RegState::Undef) 188 .addReg(AMDGPU::VGPR0, RegState::Undef) 189 .addReg(AMDGPU::VGPR0, RegState::Undef) 190 .addImm(1) // vm 191 .addImm(0) // compr 192 .addImm(0); // en 193 194 // ... and terminate wavefront. 195 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 196 197 return true; 198 } 199 200 void SIInsertSkips::kill(MachineInstr &MI) { 201 MachineBasicBlock &MBB = *MI.getParent(); 202 DebugLoc DL = MI.getDebugLoc(); 203 204 switch (MI.getOpcode()) { 205 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: { 206 unsigned Opcode = 0; 207 208 // The opcodes are inverted because the inline immediate has to be 209 // the first operand, e.g. from "x < imm" to "imm > x" 210 switch (MI.getOperand(2).getImm()) { 211 case ISD::SETOEQ: 212 case ISD::SETEQ: 213 Opcode = AMDGPU::V_CMPX_EQ_F32_e64; 214 break; 215 case ISD::SETOGT: 216 case ISD::SETGT: 217 Opcode = AMDGPU::V_CMPX_LT_F32_e64; 218 break; 219 case ISD::SETOGE: 220 case ISD::SETGE: 221 Opcode = AMDGPU::V_CMPX_LE_F32_e64; 222 break; 223 case ISD::SETOLT: 224 case ISD::SETLT: 225 Opcode = AMDGPU::V_CMPX_GT_F32_e64; 226 break; 227 case ISD::SETOLE: 228 case ISD::SETLE: 229 Opcode = AMDGPU::V_CMPX_GE_F32_e64; 230 break; 231 case ISD::SETONE: 232 case ISD::SETNE: 233 Opcode = AMDGPU::V_CMPX_LG_F32_e64; 234 break; 235 case ISD::SETO: 236 Opcode = AMDGPU::V_CMPX_O_F32_e64; 237 break; 238 case ISD::SETUO: 239 Opcode = AMDGPU::V_CMPX_U_F32_e64; 240 break; 241 case ISD::SETUEQ: 242 Opcode = AMDGPU::V_CMPX_NLG_F32_e64; 243 break; 244 case ISD::SETUGT: 245 Opcode = AMDGPU::V_CMPX_NGE_F32_e64; 246 break; 247 case ISD::SETUGE: 248 Opcode = AMDGPU::V_CMPX_NGT_F32_e64; 249 break; 250 case ISD::SETULT: 251 Opcode = AMDGPU::V_CMPX_NLE_F32_e64; 252 break; 253 case ISD::SETULE: 254 Opcode = AMDGPU::V_CMPX_NLT_F32_e64; 255 break; 256 case ISD::SETUNE: 257 Opcode = AMDGPU::V_CMPX_NEQ_F32_e64; 258 break; 259 default: 260 llvm_unreachable("invalid ISD:SET cond code"); 261 } 262 263 assert(MI.getOperand(0).isReg()); 264 265 if (TRI->isVGPR(MBB.getParent()->getRegInfo(), 266 MI.getOperand(0).getReg())) { 267 Opcode = AMDGPU::getVOPe32(Opcode); 268 BuildMI(MBB, &MI, DL, TII->get(Opcode)) 269 .add(MI.getOperand(1)) 270 .add(MI.getOperand(0)); 271 } else { 272 BuildMI(MBB, &MI, DL, TII->get(Opcode)) 273 .addReg(AMDGPU::VCC, RegState::Define) 274 .addImm(0) // src0 modifiers 275 .add(MI.getOperand(1)) 276 .addImm(0) // src1 modifiers 277 .add(MI.getOperand(0)) 278 .addImm(0); // omod 279 } 280 break; 281 } 282 case AMDGPU::SI_KILL_I1_TERMINATOR: { 283 const MachineOperand &Op = MI.getOperand(0); 284 int64_t KillVal = MI.getOperand(1).getImm(); 285 assert(KillVal == 0 || KillVal == -1); 286 287 // Kill all threads if Op0 is an immediate and equal to the Kill value. 288 if (Op.isImm()) { 289 int64_t Imm = Op.getImm(); 290 assert(Imm == 0 || Imm == -1); 291 292 if (Imm == KillVal) 293 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 294 .addImm(0); 295 break; 296 } 297 298 unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64; 299 BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC) 300 .addReg(AMDGPU::EXEC) 301 .add(Op); 302 break; 303 } 304 default: 305 llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR"); 306 } 307 } 308 309 MachineBasicBlock *SIInsertSkips::insertSkipBlock( 310 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { 311 MachineFunction *MF = MBB.getParent(); 312 313 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock(); 314 MachineFunction::iterator MBBI(MBB); 315 ++MBBI; 316 317 MF->insert(MBBI, SkipBB); 318 MBB.addSuccessor(SkipBB); 319 320 return SkipBB; 321 } 322 323 // Returns true if a branch over the block was inserted. 324 bool SIInsertSkips::skipMaskBranch(MachineInstr &MI, 325 MachineBasicBlock &SrcMBB) { 326 MachineBasicBlock *DestBB = MI.getOperand(0).getMBB(); 327 328 if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB)) 329 return false; 330 331 const DebugLoc &DL = MI.getDebugLoc(); 332 MachineBasicBlock::iterator InsPt = std::next(MI.getIterator()); 333 334 BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 335 .addMBB(DestBB); 336 337 return true; 338 } 339 340 bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) { 341 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 342 TII = ST.getInstrInfo(); 343 TRI = &TII->getRegisterInfo(); 344 SkipThreshold = SkipThresholdFlag; 345 346 bool HaveKill = false; 347 bool MadeChange = false; 348 349 // Track depth of exec mask, divergent branches. 350 SmallVector<MachineBasicBlock *, 16> ExecBranchStack; 351 352 MachineFunction::iterator NextBB; 353 354 MachineBasicBlock *EmptyMBBAtEnd = nullptr; 355 356 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 357 BI != BE; BI = NextBB) { 358 NextBB = std::next(BI); 359 MachineBasicBlock &MBB = *BI; 360 bool HaveSkipBlock = false; 361 362 if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) { 363 // Reached convergence point for last divergent branch. 364 ExecBranchStack.pop_back(); 365 } 366 367 if (HaveKill && ExecBranchStack.empty()) { 368 HaveKill = false; 369 370 // TODO: Insert skip if exec is 0? 371 } 372 373 MachineBasicBlock::iterator I, Next; 374 for (I = MBB.begin(); I != MBB.end(); I = Next) { 375 Next = std::next(I); 376 377 MachineInstr &MI = *I; 378 379 switch (MI.getOpcode()) { 380 case AMDGPU::SI_MASK_BRANCH: 381 ExecBranchStack.push_back(MI.getOperand(0).getMBB()); 382 MadeChange |= skipMaskBranch(MI, MBB); 383 break; 384 385 case AMDGPU::S_BRANCH: 386 // Optimize out branches to the next block. 387 // FIXME: Shouldn't this be handled by BranchFolding? 388 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) { 389 MI.eraseFromParent(); 390 } else if (HaveSkipBlock) { 391 // Remove the given unconditional branch when a skip block has been 392 // inserted after the current one and let skip the two instructions 393 // performing the kill if the exec mask is non-zero. 394 MI.eraseFromParent(); 395 } 396 break; 397 398 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 399 case AMDGPU::SI_KILL_I1_TERMINATOR: 400 MadeChange = true; 401 kill(MI); 402 403 if (ExecBranchStack.empty()) { 404 if (skipIfDead(MI, *NextBB)) { 405 HaveSkipBlock = true; 406 NextBB = std::next(BI); 407 BE = MF.end(); 408 } 409 } else { 410 HaveKill = true; 411 } 412 413 MI.eraseFromParent(); 414 break; 415 416 case AMDGPU::SI_RETURN_TO_EPILOG: 417 // FIXME: Should move somewhere else 418 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid()); 419 420 // Graphics shaders returning non-void shouldn't contain S_ENDPGM, 421 // because external bytecode will be appended at the end. 422 if (BI != --MF.end() || I != MBB.getFirstTerminator()) { 423 // SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at 424 // the end and jump there. 425 if (!EmptyMBBAtEnd) { 426 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 427 MF.insert(MF.end(), EmptyMBBAtEnd); 428 } 429 430 MBB.addSuccessor(EmptyMBBAtEnd); 431 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) 432 .addMBB(EmptyMBBAtEnd); 433 I->eraseFromParent(); 434 } 435 break; 436 437 default: 438 break; 439 } 440 } 441 } 442 443 return MadeChange; 444 } 445