1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// The pass tries to use the 32-bit encoding for instructions when possible. 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/CodeGen/MachineFunctionPass.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/IR/Constants.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/raw_ostream.h" 24 #include "llvm/Target/TargetMachine.h" 25 26 #define DEBUG_TYPE "si-shrink-instructions" 27 28 STATISTIC(NumInstructionsShrunk, 29 "Number of 64-bit instruction reduced to 32-bit."); 30 STATISTIC(NumLiteralConstantsFolded, 31 "Number of literal constants folded into 32-bit instructions."); 32 33 using namespace llvm; 34 35 namespace { 36 37 class SIShrinkInstructions : public MachineFunctionPass { 38 public: 39 static char ID; 40 41 void shrinkMIMG(MachineInstr &MI); 42 43 public: 44 SIShrinkInstructions() : MachineFunctionPass(ID) { 45 } 46 47 bool runOnMachineFunction(MachineFunction &MF) override; 48 49 StringRef getPassName() const override { return "SI Shrink Instructions"; } 50 51 void getAnalysisUsage(AnalysisUsage &AU) const override { 52 AU.setPreservesCFG(); 53 MachineFunctionPass::getAnalysisUsage(AU); 54 } 55 }; 56 57 } // End anonymous namespace. 58 59 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE, 60 "SI Shrink Instructions", false, false) 61 62 char SIShrinkInstructions::ID = 0; 63 64 FunctionPass *llvm::createSIShrinkInstructionsPass() { 65 return new SIShrinkInstructions(); 66 } 67 68 /// This function checks \p MI for operands defined by a move immediate 69 /// instruction and then folds the literal constant into the instruction if it 70 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions. 71 static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, 72 MachineRegisterInfo &MRI, bool TryToCommute = true) { 73 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI)); 74 75 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 76 77 // Try to fold Src0 78 MachineOperand &Src0 = MI.getOperand(Src0Idx); 79 if (Src0.isReg()) { 80 Register Reg = Src0.getReg(); 81 if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) { 82 MachineInstr *Def = MRI.getUniqueVRegDef(Reg); 83 if (Def && Def->isMoveImmediate()) { 84 MachineOperand &MovSrc = Def->getOperand(1); 85 bool ConstantFolded = false; 86 87 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) || 88 isUInt<32>(MovSrc.getImm()))) { 89 // It's possible to have only one component of a super-reg defined by 90 // a single mov, so we need to clear any subregister flag. 91 Src0.setSubReg(0); 92 Src0.ChangeToImmediate(MovSrc.getImm()); 93 ConstantFolded = true; 94 } else if (MovSrc.isFI()) { 95 Src0.setSubReg(0); 96 Src0.ChangeToFrameIndex(MovSrc.getIndex()); 97 ConstantFolded = true; 98 } else if (MovSrc.isGlobal()) { 99 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(), 100 MovSrc.getTargetFlags()); 101 ConstantFolded = true; 102 } 103 104 if (ConstantFolded) { 105 assert(MRI.use_empty(Reg)); 106 Def->eraseFromParent(); 107 ++NumLiteralConstantsFolded; 108 return true; 109 } 110 } 111 } 112 } 113 114 // We have failed to fold src0, so commute the instruction and try again. 115 if (TryToCommute && MI.isCommutable()) { 116 if (TII->commuteInstruction(MI)) { 117 if (foldImmediates(MI, TII, MRI, false)) 118 return true; 119 120 // Commute back. 121 TII->commuteInstruction(MI); 122 } 123 } 124 125 return false; 126 } 127 128 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { 129 return isInt<16>(Src.getImm()) && 130 !TII->isInlineConstant(*Src.getParent(), 131 Src.getParent()->getOperandNo(&Src)); 132 } 133 134 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { 135 return isUInt<16>(Src.getImm()) && 136 !TII->isInlineConstant(*Src.getParent(), 137 Src.getParent()->getOperandNo(&Src)); 138 } 139 140 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, 141 const MachineOperand &Src, 142 bool &IsUnsigned) { 143 if (isInt<16>(Src.getImm())) { 144 IsUnsigned = false; 145 return !TII->isInlineConstant(Src); 146 } 147 148 if (isUInt<16>(Src.getImm())) { 149 IsUnsigned = true; 150 return !TII->isInlineConstant(Src); 151 } 152 153 return false; 154 } 155 156 /// \returns true if the constant in \p Src should be replaced with a bitreverse 157 /// of an inline immediate. 158 static bool isReverseInlineImm(const SIInstrInfo *TII, 159 const MachineOperand &Src, 160 int32_t &ReverseImm) { 161 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src)) 162 return false; 163 164 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm())); 165 return ReverseImm >= -16 && ReverseImm <= 64; 166 } 167 168 /// Copy implicit register operands from specified instruction to this 169 /// instruction that are not part of the instruction definition. 170 static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, 171 const MachineInstr &MI) { 172 for (unsigned i = MI.getDesc().getNumOperands() + 173 MI.getDesc().getNumImplicitUses() + 174 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands(); 175 i != e; ++i) { 176 const MachineOperand &MO = MI.getOperand(i); 177 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask()) 178 NewMI.addOperand(MF, MO); 179 } 180 } 181 182 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) { 183 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to 184 // get constants on the RHS. 185 if (!MI.getOperand(0).isReg()) 186 TII->commuteInstruction(MI, false, 0, 1); 187 188 const MachineOperand &Src1 = MI.getOperand(1); 189 if (!Src1.isImm()) 190 return; 191 192 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); 193 if (SOPKOpc == -1) 194 return; 195 196 // eq/ne is special because the imm16 can be treated as signed or unsigned, 197 // and initially selectd to the unsigned versions. 198 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { 199 bool HasUImm; 200 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) { 201 if (!HasUImm) { 202 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? 203 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; 204 } 205 206 MI.setDesc(TII->get(SOPKOpc)); 207 } 208 209 return; 210 } 211 212 const MCInstrDesc &NewDesc = TII->get(SOPKOpc); 213 214 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) || 215 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) { 216 MI.setDesc(NewDesc); 217 } 218 } 219 220 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding. 221 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) { 222 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); 223 if (Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA) 224 return; 225 226 MachineFunction *MF = MI.getParent()->getParent(); 227 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 228 const SIInstrInfo *TII = ST.getInstrInfo(); 229 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 230 int VAddr0Idx = 231 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 232 unsigned NewAddrDwords = Info->VAddrDwords; 233 const TargetRegisterClass *RC; 234 235 if (Info->VAddrDwords == 2) { 236 RC = &AMDGPU::VReg_64RegClass; 237 } else if (Info->VAddrDwords == 3) { 238 RC = &AMDGPU::VReg_96RegClass; 239 } else if (Info->VAddrDwords == 4) { 240 RC = &AMDGPU::VReg_128RegClass; 241 } else if (Info->VAddrDwords <= 8) { 242 RC = &AMDGPU::VReg_256RegClass; 243 NewAddrDwords = 8; 244 } else { 245 RC = &AMDGPU::VReg_512RegClass; 246 NewAddrDwords = 16; 247 } 248 249 unsigned VgprBase = 0; 250 bool IsUndef = true; 251 bool IsKill = NewAddrDwords == Info->VAddrDwords; 252 for (unsigned i = 0; i < Info->VAddrDwords; ++i) { 253 const MachineOperand &Op = MI.getOperand(VAddr0Idx + i); 254 unsigned Vgpr = TRI.getHWRegIndex(Op.getReg()); 255 256 if (i == 0) { 257 VgprBase = Vgpr; 258 } else if (VgprBase + i != Vgpr) 259 return; 260 261 if (!Op.isUndef()) 262 IsUndef = false; 263 if (!Op.isKill()) 264 IsKill = false; 265 } 266 267 if (VgprBase + NewAddrDwords > 256) 268 return; 269 270 // Further check for implicit tied operands - this may be present if TFE is 271 // enabled 272 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe); 273 int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe); 274 unsigned TFEVal = MI.getOperand(TFEIdx).getImm(); 275 unsigned LWEVal = MI.getOperand(LWEIdx).getImm(); 276 int ToUntie = -1; 277 if (TFEVal || LWEVal) { 278 // TFE/LWE is enabled so we need to deal with an implicit tied operand 279 for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) { 280 if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() && 281 MI.getOperand(i).isImplicit()) { 282 // This is the tied operand 283 assert( 284 ToUntie == -1 && 285 "found more than one tied implicit operand when expecting only 1"); 286 ToUntie = i; 287 MI.untieRegOperand(ToUntie); 288 } 289 } 290 } 291 292 unsigned NewOpcode = 293 AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default, 294 Info->VDataDwords, NewAddrDwords); 295 MI.setDesc(TII->get(NewOpcode)); 296 MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase)); 297 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef); 298 MI.getOperand(VAddr0Idx).setIsKill(IsKill); 299 300 for (unsigned i = 1; i < Info->VAddrDwords; ++i) 301 MI.RemoveOperand(VAddr0Idx + 1); 302 303 if (ToUntie >= 0) { 304 MI.tieOperands( 305 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata), 306 ToUntie - (Info->VAddrDwords - 1)); 307 } 308 } 309 310 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals. 311 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits. 312 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or 313 /// XNOR (as a ^ b == ~(a ^ ~b)). 314 /// \returns true if the caller should continue the machine function iterator 315 static bool shrinkScalarLogicOp(const GCNSubtarget &ST, 316 MachineRegisterInfo &MRI, 317 const SIInstrInfo *TII, 318 MachineInstr &MI) { 319 unsigned Opc = MI.getOpcode(); 320 const MachineOperand *Dest = &MI.getOperand(0); 321 MachineOperand *Src0 = &MI.getOperand(1); 322 MachineOperand *Src1 = &MI.getOperand(2); 323 MachineOperand *SrcReg = Src0; 324 MachineOperand *SrcImm = Src1; 325 326 if (!SrcImm->isImm() || 327 AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm())) 328 return false; 329 330 uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm()); 331 uint32_t NewImm = 0; 332 333 if (Opc == AMDGPU::S_AND_B32) { 334 if (isPowerOf2_32(~Imm)) { 335 NewImm = countTrailingOnes(Imm); 336 Opc = AMDGPU::S_BITSET0_B32; 337 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { 338 NewImm = ~Imm; 339 Opc = AMDGPU::S_ANDN2_B32; 340 } 341 } else if (Opc == AMDGPU::S_OR_B32) { 342 if (isPowerOf2_32(Imm)) { 343 NewImm = countTrailingZeros(Imm); 344 Opc = AMDGPU::S_BITSET1_B32; 345 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { 346 NewImm = ~Imm; 347 Opc = AMDGPU::S_ORN2_B32; 348 } 349 } else if (Opc == AMDGPU::S_XOR_B32) { 350 if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { 351 NewImm = ~Imm; 352 Opc = AMDGPU::S_XNOR_B32; 353 } 354 } else { 355 llvm_unreachable("unexpected opcode"); 356 } 357 358 if ((Opc == AMDGPU::S_ANDN2_B32 || Opc == AMDGPU::S_ORN2_B32) && 359 SrcImm == Src0) { 360 if (!TII->commuteInstruction(MI, false, 1, 2)) 361 NewImm = 0; 362 } 363 364 if (NewImm != 0) { 365 if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) { 366 MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg()); 367 MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg()); 368 return true; 369 } 370 371 if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) { 372 MI.setDesc(TII->get(Opc)); 373 if (Opc == AMDGPU::S_BITSET0_B32 || 374 Opc == AMDGPU::S_BITSET1_B32) { 375 Src0->ChangeToImmediate(NewImm); 376 // Remove the immediate and add the tied input. 377 MI.getOperand(2).ChangeToRegister(Dest->getReg(), false); 378 MI.tieOperands(0, 2); 379 } else { 380 SrcImm->setImm(NewImm); 381 } 382 } 383 } 384 385 return false; 386 } 387 388 // This is the same as MachineInstr::readsRegister/modifiesRegister except 389 // it takes subregs into account. 390 static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R, 391 unsigned Reg, unsigned SubReg, 392 const SIRegisterInfo &TRI) { 393 for (const MachineOperand &MO : R) { 394 if (!MO.isReg()) 395 continue; 396 397 if (Register::isPhysicalRegister(Reg) && 398 Register::isPhysicalRegister(MO.getReg())) { 399 if (TRI.regsOverlap(Reg, MO.getReg())) 400 return true; 401 } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) { 402 LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) & 403 TRI.getSubRegIndexLaneMask(MO.getSubReg()); 404 if (Overlap.any()) 405 return true; 406 } 407 } 408 return false; 409 } 410 411 static bool instReadsReg(const MachineInstr *MI, 412 unsigned Reg, unsigned SubReg, 413 const SIRegisterInfo &TRI) { 414 return instAccessReg(MI->uses(), Reg, SubReg, TRI); 415 } 416 417 static bool instModifiesReg(const MachineInstr *MI, 418 unsigned Reg, unsigned SubReg, 419 const SIRegisterInfo &TRI) { 420 return instAccessReg(MI->defs(), Reg, SubReg, TRI); 421 } 422 423 static TargetInstrInfo::RegSubRegPair 424 getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I, 425 const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) { 426 if (TRI.getRegSizeInBits(Reg, MRI) != 32) { 427 if (Register::isPhysicalRegister(Reg)) { 428 Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I)); 429 } else { 430 Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub)); 431 } 432 } 433 return TargetInstrInfo::RegSubRegPair(Reg, Sub); 434 } 435 436 // Match: 437 // mov t, x 438 // mov x, y 439 // mov y, t 440 // 441 // => 442 // 443 // mov t, x (t is potentially dead and move eliminated) 444 // v_swap_b32 x, y 445 // 446 // Returns next valid instruction pointer if was able to create v_swap_b32. 447 // 448 // This shall not be done too early not to prevent possible folding which may 449 // remove matched moves, and this should prefereably be done before RA to 450 // release saved registers and also possibly after RA which can insert copies 451 // too. 452 // 453 // This is really just a generic peephole that is not a canocical shrinking, 454 // although requirements match the pass placement and it reduces code size too. 455 static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, 456 const SIInstrInfo *TII) { 457 assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 || 458 MovT.getOpcode() == AMDGPU::COPY); 459 460 Register T = MovT.getOperand(0).getReg(); 461 unsigned Tsub = MovT.getOperand(0).getSubReg(); 462 MachineOperand &Xop = MovT.getOperand(1); 463 464 if (!Xop.isReg()) 465 return nullptr; 466 Register X = Xop.getReg(); 467 unsigned Xsub = Xop.getSubReg(); 468 469 unsigned Size = TII->getOpSize(MovT, 0) / 4; 470 471 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 472 if (!TRI.isVGPR(MRI, X)) 473 return nullptr; 474 475 const unsigned SearchLimit = 16; 476 unsigned Count = 0; 477 for (auto Iter = std::next(MovT.getIterator()), 478 E = MovT.getParent()->instr_end(); 479 Iter != E && Count < SearchLimit; ++Iter, ++Count) { 480 481 MachineInstr *MovY = &*Iter; 482 if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 && 483 MovY->getOpcode() != AMDGPU::COPY) || 484 !MovY->getOperand(1).isReg() || 485 MovY->getOperand(1).getReg() != T || 486 MovY->getOperand(1).getSubReg() != Tsub) 487 continue; 488 489 Register Y = MovY->getOperand(0).getReg(); 490 unsigned Ysub = MovY->getOperand(0).getSubReg(); 491 492 if (!TRI.isVGPR(MRI, Y)) 493 continue; 494 495 MachineInstr *MovX = nullptr; 496 for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator()); 497 I != IY; ++I) { 498 if (instReadsReg(&*I, X, Xsub, TRI) || 499 instModifiesReg(&*I, Y, Ysub, TRI) || 500 instModifiesReg(&*I, T, Tsub, TRI) || 501 (MovX && instModifiesReg(&*I, X, Xsub, TRI))) { 502 MovX = nullptr; 503 break; 504 } 505 if (!instReadsReg(&*I, Y, Ysub, TRI)) { 506 if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) { 507 MovX = nullptr; 508 break; 509 } 510 continue; 511 } 512 if (MovX || 513 (I->getOpcode() != AMDGPU::V_MOV_B32_e32 && 514 I->getOpcode() != AMDGPU::COPY) || 515 I->getOperand(0).getReg() != X || 516 I->getOperand(0).getSubReg() != Xsub) { 517 MovX = nullptr; 518 break; 519 } 520 MovX = &*I; 521 } 522 523 if (!MovX) 524 continue; 525 526 LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << MovY); 527 528 for (unsigned I = 0; I < Size; ++I) { 529 TargetInstrInfo::RegSubRegPair X1, Y1; 530 X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI); 531 Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI); 532 BuildMI(*MovT.getParent(), MovX->getIterator(), MovT.getDebugLoc(), 533 TII->get(AMDGPU::V_SWAP_B32)) 534 .addDef(X1.Reg, 0, X1.SubReg) 535 .addDef(Y1.Reg, 0, Y1.SubReg) 536 .addReg(Y1.Reg, 0, Y1.SubReg) 537 .addReg(X1.Reg, 0, X1.SubReg).getInstr(); 538 } 539 MovX->eraseFromParent(); 540 MovY->eraseFromParent(); 541 MachineInstr *Next = &*std::next(MovT.getIterator()); 542 if (MRI.use_nodbg_empty(T)) 543 MovT.eraseFromParent(); 544 else 545 Xop.setIsKill(false); 546 547 return Next; 548 } 549 550 return nullptr; 551 } 552 553 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { 554 if (skipFunction(MF.getFunction())) 555 return false; 556 557 MachineRegisterInfo &MRI = MF.getRegInfo(); 558 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 559 const SIInstrInfo *TII = ST.getInstrInfo(); 560 unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC; 561 562 std::vector<unsigned> I1Defs; 563 564 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 565 BI != BE; ++BI) { 566 567 MachineBasicBlock &MBB = *BI; 568 MachineBasicBlock::iterator I, Next; 569 for (I = MBB.begin(); I != MBB.end(); I = Next) { 570 Next = std::next(I); 571 MachineInstr &MI = *I; 572 573 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) { 574 // If this has a literal constant source that is the same as the 575 // reversed bits of an inline immediate, replace with a bitreverse of 576 // that constant. This saves 4 bytes in the common case of materializing 577 // sign bits. 578 579 // Test if we are after regalloc. We only want to do this after any 580 // optimizations happen because this will confuse them. 581 // XXX - not exactly a check for post-regalloc run. 582 MachineOperand &Src = MI.getOperand(1); 583 if (Src.isImm() && 584 Register::isPhysicalRegister(MI.getOperand(0).getReg())) { 585 int32_t ReverseImm; 586 if (isReverseInlineImm(TII, Src, ReverseImm)) { 587 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32)); 588 Src.setImm(ReverseImm); 589 continue; 590 } 591 } 592 } 593 594 if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 || 595 MI.getOpcode() == AMDGPU::COPY)) { 596 if (auto *NextMI = matchSwap(MI, MRI, TII)) { 597 Next = NextMI->getIterator(); 598 continue; 599 } 600 } 601 602 // Combine adjacent s_nops to use the immediate operand encoding how long 603 // to wait. 604 // 605 // s_nop N 606 // s_nop M 607 // => 608 // s_nop (N + M) 609 if (MI.getOpcode() == AMDGPU::S_NOP && 610 MI.getNumOperands() == 1 && // Don't merge with implicit operands 611 Next != MBB.end() && 612 (*Next).getOpcode() == AMDGPU::S_NOP && 613 (*Next).getNumOperands() == 1) { 614 615 MachineInstr &NextMI = *Next; 616 // The instruction encodes the amount to wait with an offset of 1, 617 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back 618 // after adding. 619 uint8_t Nop0 = MI.getOperand(0).getImm() + 1; 620 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1; 621 622 // Make sure we don't overflow the bounds. 623 if (Nop0 + Nop1 <= 8) { 624 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1); 625 MI.eraseFromParent(); 626 } 627 628 continue; 629 } 630 631 // FIXME: We also need to consider movs of constant operands since 632 // immediate operands are not folded if they have more than one use, and 633 // the operand folding pass is unaware if the immediate will be free since 634 // it won't know if the src == dest constraint will end up being 635 // satisfied. 636 if (MI.getOpcode() == AMDGPU::S_ADD_I32 || 637 MI.getOpcode() == AMDGPU::S_MUL_I32) { 638 const MachineOperand *Dest = &MI.getOperand(0); 639 MachineOperand *Src0 = &MI.getOperand(1); 640 MachineOperand *Src1 = &MI.getOperand(2); 641 642 if (!Src0->isReg() && Src1->isReg()) { 643 if (TII->commuteInstruction(MI, false, 1, 2)) 644 std::swap(Src0, Src1); 645 } 646 647 // FIXME: This could work better if hints worked with subregisters. If 648 // we have a vector add of a constant, we usually don't get the correct 649 // allocation due to the subregister usage. 650 if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) { 651 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg()); 652 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg()); 653 continue; 654 } 655 656 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) { 657 if (Src1->isImm() && isKImmOperand(TII, *Src1)) { 658 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? 659 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32; 660 661 MI.setDesc(TII->get(Opc)); 662 MI.tieOperands(0, 1); 663 } 664 } 665 } 666 667 // Try to use s_cmpk_* 668 if (MI.isCompare() && TII->isSOPC(MI)) { 669 shrinkScalarCompare(TII, MI); 670 continue; 671 } 672 673 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates. 674 if (MI.getOpcode() == AMDGPU::S_MOV_B32) { 675 const MachineOperand &Dst = MI.getOperand(0); 676 MachineOperand &Src = MI.getOperand(1); 677 678 if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) { 679 int32_t ReverseImm; 680 if (isKImmOperand(TII, Src)) 681 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32)); 682 else if (isReverseInlineImm(TII, Src, ReverseImm)) { 683 MI.setDesc(TII->get(AMDGPU::S_BREV_B32)); 684 Src.setImm(ReverseImm); 685 } 686 } 687 688 continue; 689 } 690 691 // Shrink scalar logic operations. 692 if (MI.getOpcode() == AMDGPU::S_AND_B32 || 693 MI.getOpcode() == AMDGPU::S_OR_B32 || 694 MI.getOpcode() == AMDGPU::S_XOR_B32) { 695 if (shrinkScalarLogicOp(ST, MRI, TII, MI)) 696 continue; 697 } 698 699 if (TII->isMIMG(MI.getOpcode()) && 700 ST.getGeneration() >= AMDGPUSubtarget::GFX10 && 701 MF.getProperties().hasProperty( 702 MachineFunctionProperties::Property::NoVRegs)) { 703 shrinkMIMG(MI); 704 continue; 705 } 706 707 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 708 continue; 709 710 if (!TII->canShrink(MI, MRI)) { 711 // Try commuting the instruction and see if that enables us to shrink 712 // it. 713 if (!MI.isCommutable() || !TII->commuteInstruction(MI) || 714 !TII->canShrink(MI, MRI)) 715 continue; 716 } 717 718 // getVOPe32 could be -1 here if we started with an instruction that had 719 // a 32-bit encoding and then commuted it to an instruction that did not. 720 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 721 continue; 722 723 int Op32 = AMDGPU::getVOPe32(MI.getOpcode()); 724 725 if (TII->isVOPC(Op32)) { 726 Register DstReg = MI.getOperand(0).getReg(); 727 if (Register::isVirtualRegister(DstReg)) { 728 // VOPC instructions can only write to the VCC register. We can't 729 // force them to use VCC here, because this is only one register and 730 // cannot deal with sequences which would require multiple copies of 731 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...) 732 // 733 // So, instead of forcing the instruction to write to VCC, we provide 734 // a hint to the register allocator to use VCC and then we will run 735 // this pass again after RA and shrink it if it outputs to VCC. 736 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg); 737 continue; 738 } 739 if (DstReg != VCCReg) 740 continue; 741 } 742 743 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) { 744 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC 745 // instructions. 746 const MachineOperand *Src2 = 747 TII->getNamedOperand(MI, AMDGPU::OpName::src2); 748 if (!Src2->isReg()) 749 continue; 750 Register SReg = Src2->getReg(); 751 if (Register::isVirtualRegister(SReg)) { 752 MRI.setRegAllocationHint(SReg, 0, VCCReg); 753 continue; 754 } 755 if (SReg != VCCReg) 756 continue; 757 } 758 759 // Check for the bool flag output for instructions like V_ADD_I32_e64. 760 const MachineOperand *SDst = TII->getNamedOperand(MI, 761 AMDGPU::OpName::sdst); 762 763 // Check the carry-in operand for v_addc_u32_e64. 764 const MachineOperand *Src2 = TII->getNamedOperand(MI, 765 AMDGPU::OpName::src2); 766 767 if (SDst) { 768 bool Next = false; 769 770 if (SDst->getReg() != VCCReg) { 771 if (Register::isVirtualRegister(SDst->getReg())) 772 MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg); 773 Next = true; 774 } 775 776 // All of the instructions with carry outs also have an SGPR input in 777 // src2. 778 if (Src2 && Src2->getReg() != VCCReg) { 779 if (Register::isVirtualRegister(Src2->getReg())) 780 MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg); 781 Next = true; 782 } 783 784 if (Next) 785 continue; 786 } 787 788 // We can shrink this instruction 789 LLVM_DEBUG(dbgs() << "Shrinking " << MI); 790 791 MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32); 792 ++NumInstructionsShrunk; 793 794 // Copy extra operands not present in the instruction definition. 795 copyExtraImplicitOps(*Inst32, MF, MI); 796 797 MI.eraseFromParent(); 798 foldImmediates(*Inst32, TII, MRI); 799 800 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n'); 801 } 802 } 803 return false; 804 } 805