1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// The pass tries to use the 32-bit encoding for instructions when possible. 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUMCInstLower.h" 14 #include "AMDGPUSubtarget.h" 15 #include "SIInstrInfo.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/CodeGen/MachineFunctionPass.h" 18 #include "llvm/CodeGen/MachineInstrBuilder.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/IR/Constants.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "si-shrink-instructions" 28 29 STATISTIC(NumInstructionsShrunk, 30 "Number of 64-bit instruction reduced to 32-bit."); 31 STATISTIC(NumLiteralConstantsFolded, 32 "Number of literal constants folded into 32-bit instructions."); 33 34 namespace llvm { 35 void initializeSIShrinkInstructionsPass(PassRegistry&); 36 } 37 38 using namespace llvm; 39 40 namespace { 41 42 class SIShrinkInstructions : public MachineFunctionPass { 43 public: 44 static char ID; 45 46 public: 47 SIShrinkInstructions() : MachineFunctionPass(ID) { 48 } 49 50 bool runOnMachineFunction(MachineFunction &MF) override; 51 52 const char *getPassName() const override { 53 return "SI Shrink Instructions"; 54 } 55 56 void getAnalysisUsage(AnalysisUsage &AU) const override { 57 AU.setPreservesCFG(); 58 MachineFunctionPass::getAnalysisUsage(AU); 59 } 60 }; 61 62 } // End anonymous namespace. 63 64 INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE, 65 "SI Lower il Copies", false, false) 66 INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE, 67 "SI Lower il Copies", false, false) 68 69 char SIShrinkInstructions::ID = 0; 70 71 FunctionPass *llvm::createSIShrinkInstructionsPass() { 72 return new SIShrinkInstructions(); 73 } 74 75 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI, 76 const MachineRegisterInfo &MRI) { 77 if (!MO->isReg()) 78 return false; 79 80 if (TargetRegisterInfo::isVirtualRegister(MO->getReg())) 81 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg())); 82 83 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg())); 84 } 85 86 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, 87 const SIRegisterInfo &TRI, 88 const MachineRegisterInfo &MRI) { 89 90 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 91 // Can't shrink instruction with three operands. 92 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 93 // a special case for it. It can only be shrunk if the third operand 94 // is vcc. We should handle this the same way we handle vopc, by addding 95 // a register allocation hint pre-regalloc and then do the shrining 96 // post-regalloc. 97 if (Src2) { 98 switch (MI.getOpcode()) { 99 default: return false; 100 101 case AMDGPU::V_MAC_F32_e64: 102 if (!isVGPR(Src2, TRI, MRI) || 103 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 104 return false; 105 break; 106 107 case AMDGPU::V_CNDMASK_B32_e64: 108 break; 109 } 110 } 111 112 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 113 const MachineOperand *Src1Mod = 114 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 115 116 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0))) 117 return false; 118 119 // We don't need to check src0, all input types are legal, so just make sure 120 // src0 isn't using any modifiers. 121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 122 return false; 123 124 // Check output modifiers 125 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 126 return false; 127 128 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp); 129 } 130 131 /// \brief This function checks \p MI for operands defined by a move immediate 132 /// instruction and then folds the literal constant into the instruction if it 133 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction 134 /// and will only fold literal constants if we are still in SSA. 135 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, 136 MachineRegisterInfo &MRI, bool TryToCommute = true) { 137 138 if (!MRI.isSSA()) 139 return; 140 141 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI)); 142 143 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 144 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 145 MachineOperand &Src0 = MI.getOperand(Src0Idx); 146 147 // Only one literal constant is allowed per instruction, so if src0 is a 148 // literal constant then we can't do any folding. 149 if (Src0.isImm() && 150 TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx))) 151 return; 152 153 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an 154 // SGPR, we cannot commute the instruction, so we can't fold any literal 155 // constants. 156 if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI)) 157 return; 158 159 // Try to fold Src0 160 if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) { 161 unsigned Reg = Src0.getReg(); 162 MachineInstr *Def = MRI.getUniqueVRegDef(Reg); 163 if (Def && Def->isMoveImmediate()) { 164 MachineOperand &MovSrc = Def->getOperand(1); 165 bool ConstantFolded = false; 166 167 if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) { 168 Src0.ChangeToImmediate(MovSrc.getImm()); 169 ConstantFolded = true; 170 } 171 if (ConstantFolded) { 172 if (MRI.use_empty(Reg)) 173 Def->eraseFromParent(); 174 ++NumLiteralConstantsFolded; 175 return; 176 } 177 } 178 } 179 180 // We have failed to fold src0, so commute the instruction and try again. 181 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI)) 182 foldImmediates(MI, TII, MRI, false); 183 184 } 185 186 // Copy MachineOperand with all flags except setting it as implicit. 187 static MachineOperand copyRegOperandAsImplicit(const MachineOperand &Orig) { 188 assert(!Orig.isImplicit()); 189 return MachineOperand::CreateReg(Orig.getReg(), 190 Orig.isDef(), 191 true, 192 Orig.isKill(), 193 Orig.isDead(), 194 Orig.isUndef(), 195 Orig.isEarlyClobber(), 196 Orig.getSubReg(), 197 Orig.isDebug(), 198 Orig.isInternalRead()); 199 } 200 201 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { 202 return isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4); 203 } 204 205 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { 206 if (skipFunction(*MF.getFunction())) 207 return false; 208 209 MachineRegisterInfo &MRI = MF.getRegInfo(); 210 const SIInstrInfo *TII = 211 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 212 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 213 std::vector<unsigned> I1Defs; 214 215 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 216 BI != BE; ++BI) { 217 218 MachineBasicBlock &MBB = *BI; 219 MachineBasicBlock::iterator I, Next; 220 for (I = MBB.begin(); I != MBB.end(); I = Next) { 221 Next = std::next(I); 222 MachineInstr &MI = *I; 223 224 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) { 225 // If this has a literal constant source that is the same as the 226 // reversed bits of an inline immediate, replace with a bitreverse of 227 // that constant. This saves 4 bytes in the common case of materializing 228 // sign bits. 229 230 // Test if we are after regalloc. We only want to do this after any 231 // optimizations happen because this will confuse them. 232 // XXX - not exactly a check for post-regalloc run. 233 MachineOperand &Src = MI.getOperand(1); 234 if (Src.isImm() && 235 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) { 236 int64_t Imm = Src.getImm(); 237 if (isInt<32>(Imm) && !TII->isInlineConstant(Src, 4)) { 238 int32_t ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Imm)); 239 if (ReverseImm >= -16 && ReverseImm <= 64) { 240 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32)); 241 Src.setImm(ReverseImm); 242 continue; 243 } 244 } 245 } 246 } 247 248 // Combine adjacent s_nops to use the immediate operand encoding how long 249 // to wait. 250 // 251 // s_nop N 252 // s_nop M 253 // => 254 // s_nop (N + M) 255 if (MI.getOpcode() == AMDGPU::S_NOP && 256 Next != MBB.end() && 257 (*Next).getOpcode() == AMDGPU::S_NOP) { 258 259 MachineInstr &NextMI = *Next; 260 // The instruction encodes the amount to wait with an offset of 1, 261 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back 262 // after adding. 263 uint8_t Nop0 = MI.getOperand(0).getImm() + 1; 264 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1; 265 266 // Make sure we don't overflow the bounds. 267 if (Nop0 + Nop1 <= 8) { 268 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1); 269 MI.eraseFromParent(); 270 } 271 272 continue; 273 } 274 275 // FIXME: We also need to consider movs of constant operands since 276 // immediate operands are not folded if they have more than one use, and 277 // the operand folding pass is unaware if the immediate will be free since 278 // it won't know if the src == dest constraint will end up being 279 // satisfied. 280 if (MI.getOpcode() == AMDGPU::S_ADD_I32 || 281 MI.getOpcode() == AMDGPU::S_MUL_I32) { 282 const MachineOperand &Dest = MI.getOperand(0); 283 const MachineOperand &Src0 = MI.getOperand(1); 284 const MachineOperand &Src1 = MI.getOperand(2); 285 286 // FIXME: This could work better if hints worked with subregisters. If 287 // we have a vector add of a constant, we usually don't get the correct 288 // allocation due to the subregister usage. 289 if (TargetRegisterInfo::isVirtualRegister(Dest.getReg()) && 290 Src0.isReg()) { 291 MRI.setRegAllocationHint(Dest.getReg(), 0, Src0.getReg()); 292 continue; 293 } 294 295 if (Src0.isReg() && Src0.getReg() == Dest.getReg()) { 296 if (Src1.isImm() && isKImmOperand(TII, Src1)) { 297 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? 298 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32; 299 300 MI.setDesc(TII->get(Opc)); 301 MI.tieOperands(0, 1); 302 } 303 } 304 } 305 306 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates. 307 if (MI.getOpcode() == AMDGPU::S_MOV_B32) { 308 const MachineOperand &Src = MI.getOperand(1); 309 310 if (Src.isImm() && isKImmOperand(TII, Src)) 311 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32)); 312 313 continue; 314 } 315 316 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 317 continue; 318 319 if (!canShrink(MI, TII, TRI, MRI)) { 320 // Try commuting the instruction and see if that enables us to shrink 321 // it. 322 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) || 323 !canShrink(MI, TII, TRI, MRI)) 324 continue; 325 } 326 327 // getVOPe32 could be -1 here if we started with an instruction that had 328 // a 32-bit encoding and then commuted it to an instruction that did not. 329 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 330 continue; 331 332 int Op32 = AMDGPU::getVOPe32(MI.getOpcode()); 333 334 if (TII->isVOPC(Op32)) { 335 unsigned DstReg = MI.getOperand(0).getReg(); 336 if (TargetRegisterInfo::isVirtualRegister(DstReg)) { 337 // VOPC instructions can only write to the VCC register. We can't 338 // force them to use VCC here, because this is only one register and 339 // cannot deal with sequences which would require multiple copies of 340 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...) 341 // 342 // So, instead of forcing the instruction to write to VCC, we provide 343 // a hint to the register allocator to use VCC and then we we will run 344 // this pass again after RA and shrink it if it outputs to VCC. 345 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC); 346 continue; 347 } 348 if (DstReg != AMDGPU::VCC) 349 continue; 350 } 351 352 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) { 353 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC 354 // instructions. 355 const MachineOperand *Src2 = 356 TII->getNamedOperand(MI, AMDGPU::OpName::src2); 357 if (!Src2->isReg()) 358 continue; 359 unsigned SReg = Src2->getReg(); 360 if (TargetRegisterInfo::isVirtualRegister(SReg)) { 361 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC); 362 continue; 363 } 364 if (SReg != AMDGPU::VCC) 365 continue; 366 } 367 368 // We can shrink this instruction 369 DEBUG(dbgs() << "Shrinking " << MI); 370 371 MachineInstrBuilder Inst32 = 372 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32)); 373 374 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 375 // For VOPC instructions, this is replaced by an implicit def of vcc. 376 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 377 if (Op32DstIdx != -1) { 378 // dst 379 Inst32.addOperand(MI.getOperand(0)); 380 } else { 381 assert(MI.getOperand(0).getReg() == AMDGPU::VCC && 382 "Unexpected case"); 383 } 384 385 386 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); 387 388 const MachineOperand *Src1 = 389 TII->getNamedOperand(MI, AMDGPU::OpName::src1); 390 if (Src1) 391 Inst32.addOperand(*Src1); 392 393 const MachineOperand *Src2 = 394 TII->getNamedOperand(MI, AMDGPU::OpName::src2); 395 if (Src2) { 396 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 397 if (Op32Src2Idx != -1) { 398 Inst32.addOperand(*Src2); 399 } else { 400 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 401 // replaced with an implicit read of vcc. 402 assert(Src2->getReg() == AMDGPU::VCC && 403 "Unexpected missing register operand"); 404 Inst32.addOperand(copyRegOperandAsImplicit(*Src2)); 405 } 406 } 407 408 ++NumInstructionsShrunk; 409 MI.eraseFromParent(); 410 411 foldImmediates(*Inst32, TII, MRI); 412 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n'); 413 414 415 } 416 } 417 return false; 418 } 419