1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// The pass tries to use the 32-bit encoding for instructions when possible. 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUMCInstLower.h" 14 #include "AMDGPUSubtarget.h" 15 #include "SIInstrInfo.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/CodeGen/MachineFunctionPass.h" 18 #include "llvm/CodeGen/MachineInstrBuilder.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/IR/Constants.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "si-shrink-instructions" 28 29 STATISTIC(NumInstructionsShrunk, 30 "Number of 64-bit instruction reduced to 32-bit."); 31 STATISTIC(NumLiteralConstantsFolded, 32 "Number of literal constants folded into 32-bit instructions."); 33 34 using namespace llvm; 35 36 namespace { 37 38 class SIShrinkInstructions : public MachineFunctionPass { 39 public: 40 static char ID; 41 42 public: 43 SIShrinkInstructions() : MachineFunctionPass(ID) { 44 } 45 46 bool runOnMachineFunction(MachineFunction &MF) override; 47 48 StringRef getPassName() const override { return "SI Shrink Instructions"; } 49 50 void getAnalysisUsage(AnalysisUsage &AU) const override { 51 AU.setPreservesCFG(); 52 MachineFunctionPass::getAnalysisUsage(AU); 53 } 54 }; 55 56 } // End anonymous namespace. 57 58 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE, 59 "SI Shrink Instructions", false, false) 60 61 char SIShrinkInstructions::ID = 0; 62 63 FunctionPass *llvm::createSIShrinkInstructionsPass() { 64 return new SIShrinkInstructions(); 65 } 66 67 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI, 68 const MachineRegisterInfo &MRI) { 69 if (!MO->isReg()) 70 return false; 71 72 if (TargetRegisterInfo::isVirtualRegister(MO->getReg())) 73 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg())); 74 75 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg())); 76 } 77 78 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, 79 const SIRegisterInfo &TRI, 80 const MachineRegisterInfo &MRI) { 81 82 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 83 // Can't shrink instruction with three operands. 84 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 85 // a special case for it. It can only be shrunk if the third operand 86 // is vcc. We should handle this the same way we handle vopc, by addding 87 // a register allocation hint pre-regalloc and then do the shrinking 88 // post-regalloc. 89 if (Src2) { 90 switch (MI.getOpcode()) { 91 default: return false; 92 93 case AMDGPU::V_ADDC_U32_e64: 94 case AMDGPU::V_SUBB_U32_e64: 95 if (TII->getNamedOperand(MI, AMDGPU::OpName::src1)->isImm()) 96 return false; 97 // Additional verification is needed for sdst/src2. 98 return true; 99 100 case AMDGPU::V_MAC_F32_e64: 101 case AMDGPU::V_MAC_F16_e64: 102 if (!isVGPR(Src2, TRI, MRI) || 103 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 104 return false; 105 break; 106 107 case AMDGPU::V_CNDMASK_B32_e64: 108 break; 109 } 110 } 111 112 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 113 const MachineOperand *Src1Mod = 114 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 115 116 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0))) 117 return false; 118 119 // We don't need to check src0, all input types are legal, so just make sure 120 // src0 isn't using any modifiers. 121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 122 return false; 123 124 // Check output modifiers 125 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 126 return false; 127 128 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp); 129 } 130 131 /// \brief This function checks \p MI for operands defined by a move immediate 132 /// instruction and then folds the literal constant into the instruction if it 133 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction 134 /// and will only fold literal constants if we are still in SSA. 135 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, 136 MachineRegisterInfo &MRI, bool TryToCommute = true) { 137 138 if (!MRI.isSSA()) 139 return; 140 141 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI)); 142 143 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 144 145 // Only one literal constant is allowed per instruction, so if src0 is a 146 // literal constant then we can't do any folding. 147 if (TII->isLiteralConstant(MI, Src0Idx)) 148 return; 149 150 // Try to fold Src0 151 MachineOperand &Src0 = MI.getOperand(Src0Idx); 152 if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) { 153 unsigned Reg = Src0.getReg(); 154 MachineInstr *Def = MRI.getUniqueVRegDef(Reg); 155 if (Def && Def->isMoveImmediate()) { 156 MachineOperand &MovSrc = Def->getOperand(1); 157 bool ConstantFolded = false; 158 159 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) || 160 isUInt<32>(MovSrc.getImm()))) { 161 Src0.ChangeToImmediate(MovSrc.getImm()); 162 ConstantFolded = true; 163 } 164 if (ConstantFolded) { 165 if (MRI.use_empty(Reg)) 166 Def->eraseFromParent(); 167 ++NumLiteralConstantsFolded; 168 return; 169 } 170 } 171 } 172 173 // We have failed to fold src0, so commute the instruction and try again. 174 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI)) 175 foldImmediates(MI, TII, MRI, false); 176 177 } 178 179 // Copy MachineOperand with all flags except setting it as implicit. 180 static void copyFlagsToImplicitVCC(MachineInstr &MI, 181 const MachineOperand &Orig) { 182 183 for (MachineOperand &Use : MI.implicit_operands()) { 184 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 185 Use.setIsUndef(Orig.isUndef()); 186 Use.setIsKill(Orig.isKill()); 187 return; 188 } 189 } 190 } 191 192 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { 193 return isInt<16>(Src.getImm()) && 194 !TII->isInlineConstant(*Src.getParent(), 195 Src.getParent()->getOperandNo(&Src)); 196 } 197 198 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { 199 return isUInt<16>(Src.getImm()) && 200 !TII->isInlineConstant(*Src.getParent(), 201 Src.getParent()->getOperandNo(&Src)); 202 } 203 204 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, 205 const MachineOperand &Src, 206 bool &IsUnsigned) { 207 if (isInt<16>(Src.getImm())) { 208 IsUnsigned = false; 209 return !TII->isInlineConstant(Src); 210 } 211 212 if (isUInt<16>(Src.getImm())) { 213 IsUnsigned = true; 214 return !TII->isInlineConstant(Src); 215 } 216 217 return false; 218 } 219 220 /// \returns true if the constant in \p Src should be replaced with a bitreverse 221 /// of an inline immediate. 222 static bool isReverseInlineImm(const SIInstrInfo *TII, 223 const MachineOperand &Src, 224 int32_t &ReverseImm) { 225 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src)) 226 return false; 227 228 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm())); 229 return ReverseImm >= -16 && ReverseImm <= 64; 230 } 231 232 /// Copy implicit register operands from specified instruction to this 233 /// instruction that are not part of the instruction definition. 234 static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, 235 const MachineInstr &MI) { 236 for (unsigned i = MI.getDesc().getNumOperands() + 237 MI.getDesc().getNumImplicitUses() + 238 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands(); 239 i != e; ++i) { 240 const MachineOperand &MO = MI.getOperand(i); 241 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask()) 242 NewMI.addOperand(MF, MO); 243 } 244 } 245 246 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) { 247 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to 248 // get constants on the RHS. 249 if (!MI.getOperand(0).isReg()) 250 TII->commuteInstruction(MI, false, 0, 1); 251 252 const MachineOperand &Src1 = MI.getOperand(1); 253 if (!Src1.isImm()) 254 return; 255 256 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); 257 if (SOPKOpc == -1) 258 return; 259 260 // eq/ne is special because the imm16 can be treated as signed or unsigned, 261 // and initially selectd to the unsigned versions. 262 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { 263 bool HasUImm; 264 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) { 265 if (!HasUImm) { 266 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? 267 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; 268 } 269 270 MI.setDesc(TII->get(SOPKOpc)); 271 } 272 273 return; 274 } 275 276 const MCInstrDesc &NewDesc = TII->get(SOPKOpc); 277 278 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) || 279 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) { 280 MI.setDesc(NewDesc); 281 } 282 } 283 284 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { 285 if (skipFunction(*MF.getFunction())) 286 return false; 287 288 MachineRegisterInfo &MRI = MF.getRegInfo(); 289 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 290 const SIInstrInfo *TII = ST.getInstrInfo(); 291 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 292 293 std::vector<unsigned> I1Defs; 294 295 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 296 BI != BE; ++BI) { 297 298 MachineBasicBlock &MBB = *BI; 299 MachineBasicBlock::iterator I, Next; 300 for (I = MBB.begin(); I != MBB.end(); I = Next) { 301 Next = std::next(I); 302 MachineInstr &MI = *I; 303 304 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) { 305 // If this has a literal constant source that is the same as the 306 // reversed bits of an inline immediate, replace with a bitreverse of 307 // that constant. This saves 4 bytes in the common case of materializing 308 // sign bits. 309 310 // Test if we are after regalloc. We only want to do this after any 311 // optimizations happen because this will confuse them. 312 // XXX - not exactly a check for post-regalloc run. 313 MachineOperand &Src = MI.getOperand(1); 314 if (Src.isImm() && 315 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) { 316 int32_t ReverseImm; 317 if (isReverseInlineImm(TII, Src, ReverseImm)) { 318 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32)); 319 Src.setImm(ReverseImm); 320 continue; 321 } 322 } 323 } 324 325 // Combine adjacent s_nops to use the immediate operand encoding how long 326 // to wait. 327 // 328 // s_nop N 329 // s_nop M 330 // => 331 // s_nop (N + M) 332 if (MI.getOpcode() == AMDGPU::S_NOP && 333 Next != MBB.end() && 334 (*Next).getOpcode() == AMDGPU::S_NOP) { 335 336 MachineInstr &NextMI = *Next; 337 // The instruction encodes the amount to wait with an offset of 1, 338 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back 339 // after adding. 340 uint8_t Nop0 = MI.getOperand(0).getImm() + 1; 341 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1; 342 343 // Make sure we don't overflow the bounds. 344 if (Nop0 + Nop1 <= 8) { 345 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1); 346 MI.eraseFromParent(); 347 } 348 349 continue; 350 } 351 352 // FIXME: We also need to consider movs of constant operands since 353 // immediate operands are not folded if they have more than one use, and 354 // the operand folding pass is unaware if the immediate will be free since 355 // it won't know if the src == dest constraint will end up being 356 // satisfied. 357 if (MI.getOpcode() == AMDGPU::S_ADD_I32 || 358 MI.getOpcode() == AMDGPU::S_MUL_I32) { 359 const MachineOperand *Dest = &MI.getOperand(0); 360 MachineOperand *Src0 = &MI.getOperand(1); 361 MachineOperand *Src1 = &MI.getOperand(2); 362 363 if (!Src0->isReg() && Src1->isReg()) { 364 if (TII->commuteInstruction(MI, false, 1, 2)) 365 std::swap(Src0, Src1); 366 } 367 368 // FIXME: This could work better if hints worked with subregisters. If 369 // we have a vector add of a constant, we usually don't get the correct 370 // allocation due to the subregister usage. 371 if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) && 372 Src0->isReg()) { 373 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg()); 374 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg()); 375 continue; 376 } 377 378 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) { 379 if (Src1->isImm() && isKImmOperand(TII, *Src1)) { 380 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? 381 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32; 382 383 MI.setDesc(TII->get(Opc)); 384 MI.tieOperands(0, 1); 385 } 386 } 387 } 388 389 // Try to use s_cmpk_* 390 if (MI.isCompare() && TII->isSOPC(MI)) { 391 shrinkScalarCompare(TII, MI); 392 continue; 393 } 394 395 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates. 396 if (MI.getOpcode() == AMDGPU::S_MOV_B32) { 397 const MachineOperand &Dst = MI.getOperand(0); 398 MachineOperand &Src = MI.getOperand(1); 399 400 if (Src.isImm() && 401 TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) { 402 int32_t ReverseImm; 403 if (isKImmOperand(TII, Src)) 404 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32)); 405 else if (isReverseInlineImm(TII, Src, ReverseImm)) { 406 MI.setDesc(TII->get(AMDGPU::S_BREV_B32)); 407 Src.setImm(ReverseImm); 408 } 409 } 410 411 continue; 412 } 413 414 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 415 continue; 416 417 if (!canShrink(MI, TII, TRI, MRI)) { 418 // Try commuting the instruction and see if that enables us to shrink 419 // it. 420 if (!MI.isCommutable() || !TII->commuteInstruction(MI) || 421 !canShrink(MI, TII, TRI, MRI)) 422 continue; 423 } 424 425 // getVOPe32 could be -1 here if we started with an instruction that had 426 // a 32-bit encoding and then commuted it to an instruction that did not. 427 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 428 continue; 429 430 int Op32 = AMDGPU::getVOPe32(MI.getOpcode()); 431 432 if (TII->isVOPC(Op32)) { 433 unsigned DstReg = MI.getOperand(0).getReg(); 434 if (TargetRegisterInfo::isVirtualRegister(DstReg)) { 435 // VOPC instructions can only write to the VCC register. We can't 436 // force them to use VCC here, because this is only one register and 437 // cannot deal with sequences which would require multiple copies of 438 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...) 439 // 440 // So, instead of forcing the instruction to write to VCC, we provide 441 // a hint to the register allocator to use VCC and then we we will run 442 // this pass again after RA and shrink it if it outputs to VCC. 443 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC); 444 continue; 445 } 446 if (DstReg != AMDGPU::VCC) 447 continue; 448 } 449 450 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) { 451 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC 452 // instructions. 453 const MachineOperand *Src2 = 454 TII->getNamedOperand(MI, AMDGPU::OpName::src2); 455 if (!Src2->isReg()) 456 continue; 457 unsigned SReg = Src2->getReg(); 458 if (TargetRegisterInfo::isVirtualRegister(SReg)) { 459 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC); 460 continue; 461 } 462 if (SReg != AMDGPU::VCC) 463 continue; 464 } 465 466 // Check for the bool flag output for instructions like V_ADD_I32_e64. 467 const MachineOperand *SDst = TII->getNamedOperand(MI, 468 AMDGPU::OpName::sdst); 469 470 // Check the carry-in operand for v_addc_u32_e64. 471 const MachineOperand *Src2 = TII->getNamedOperand(MI, 472 AMDGPU::OpName::src2); 473 474 if (SDst) { 475 if (SDst->getReg() != AMDGPU::VCC) { 476 if (TargetRegisterInfo::isVirtualRegister(SDst->getReg())) 477 MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC); 478 continue; 479 } 480 481 // All of the instructions with carry outs also have an SGPR input in 482 // src2. 483 if (Src2 && Src2->getReg() != AMDGPU::VCC) { 484 if (TargetRegisterInfo::isVirtualRegister(Src2->getReg())) 485 MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC); 486 487 continue; 488 } 489 } 490 491 // We can shrink this instruction 492 DEBUG(dbgs() << "Shrinking " << MI); 493 494 MachineInstrBuilder Inst32 = 495 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32)); 496 497 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 498 // For VOPC instructions, this is replaced by an implicit def of vcc. 499 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 500 if (Op32DstIdx != -1) { 501 // dst 502 Inst32.add(MI.getOperand(0)); 503 } else { 504 assert(MI.getOperand(0).getReg() == AMDGPU::VCC && 505 "Unexpected case"); 506 } 507 508 509 Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); 510 511 const MachineOperand *Src1 = 512 TII->getNamedOperand(MI, AMDGPU::OpName::src1); 513 if (Src1) 514 Inst32.add(*Src1); 515 516 if (Src2) { 517 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 518 if (Op32Src2Idx != -1) { 519 Inst32.add(*Src2); 520 } else { 521 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 522 // replaced with an implicit read of vcc. This was already added 523 // during the initial BuildMI, so find it to preserve the flags. 524 copyFlagsToImplicitVCC(*Inst32, *Src2); 525 } 526 } 527 528 ++NumInstructionsShrunk; 529 530 // Copy extra operands not present in the instruction definition. 531 copyExtraImplicitOps(*Inst32, MF, MI); 532 533 MI.eraseFromParent(); 534 foldImmediates(*Inst32, TII, MRI); 535 536 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n'); 537 538 539 } 540 } 541 return false; 542 } 543