1 //=======- GCNDPPCombine.cpp - optimization for DPP instructions ---==========// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0 9 // operand. If any of the use instruction cannot be combined with the mov the 10 // whole sequence is reverted. 11 // 12 // $old = ... 13 // $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane, 14 // dpp_controls..., $row_mask, $bank_mask, $bound_ctrl 15 // $res = VALU $dpp_value [, src1] 16 // 17 // to 18 // 19 // $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,] 20 // dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl 21 // 22 // Combining rules : 23 // 24 // if $row_mask and $bank_mask are fully enabled (0xF) and 25 // $bound_ctrl==DPP_BOUND_ZERO or $old==0 26 // -> $combined_old = undef, 27 // $combined_bound_ctrl = DPP_BOUND_ZERO 28 // 29 // if the VALU op is binary and 30 // $bound_ctrl==DPP_BOUND_OFF and 31 // $old==identity value (immediate) for the VALU op 32 // -> $combined_old = src1, 33 // $combined_bound_ctrl = DPP_BOUND_OFF 34 // 35 // Otherwise cancel. 36 // 37 // The mov_dpp instruction should reside in the same BB as all its uses 38 //===----------------------------------------------------------------------===// 39 40 #include "AMDGPU.h" 41 #include "GCNSubtarget.h" 42 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 43 #include "llvm/ADT/Statistic.h" 44 #include "llvm/CodeGen/MachineFunctionPass.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "gcn-dpp-combine" 49 50 STATISTIC(NumDPPMovsCombined, "Number of DPP moves combined."); 51 52 namespace { 53 54 class GCNDPPCombine : public MachineFunctionPass { 55 MachineRegisterInfo *MRI; 56 const SIInstrInfo *TII; 57 const GCNSubtarget *ST; 58 59 using RegSubRegPair = TargetInstrInfo::RegSubRegPair; 60 61 MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const; 62 63 MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, 64 RegSubRegPair CombOldVGPR, 65 MachineOperand *OldOpnd, bool CombBCZ, 66 bool IsShrinkable) const; 67 68 MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, 69 RegSubRegPair CombOldVGPR, bool CombBCZ, 70 bool IsShrinkable) const; 71 72 bool hasNoImmOrEqual(MachineInstr &MI, 73 unsigned OpndName, 74 int64_t Value, 75 int64_t Mask = -1) const; 76 77 bool combineDPPMov(MachineInstr &MI) const; 78 79 public: 80 static char ID; 81 82 GCNDPPCombine() : MachineFunctionPass(ID) { 83 initializeGCNDPPCombinePass(*PassRegistry::getPassRegistry()); 84 } 85 86 bool runOnMachineFunction(MachineFunction &MF) override; 87 88 StringRef getPassName() const override { return "GCN DPP Combine"; } 89 90 void getAnalysisUsage(AnalysisUsage &AU) const override { 91 AU.setPreservesCFG(); 92 MachineFunctionPass::getAnalysisUsage(AU); 93 } 94 95 MachineFunctionProperties getRequiredProperties() const override { 96 return MachineFunctionProperties() 97 .set(MachineFunctionProperties::Property::IsSSA); 98 } 99 100 private: 101 int getDPPOp(unsigned Op, bool IsShrinkable) const; 102 bool isShrinkable(MachineInstr &MI) const; 103 }; 104 105 } // end anonymous namespace 106 107 INITIALIZE_PASS(GCNDPPCombine, DEBUG_TYPE, "GCN DPP Combine", false, false) 108 109 char GCNDPPCombine::ID = 0; 110 111 char &llvm::GCNDPPCombineID = GCNDPPCombine::ID; 112 113 FunctionPass *llvm::createGCNDPPCombinePass() { 114 return new GCNDPPCombine(); 115 } 116 117 bool GCNDPPCombine::isShrinkable(MachineInstr &MI) const { 118 unsigned Op = MI.getOpcode(); 119 if (!TII->isVOP3(Op)) { 120 return false; 121 } 122 if (!TII->hasVALU32BitEncoding(Op)) { 123 LLVM_DEBUG(dbgs() << " Inst hasn't e32 equivalent\n"); 124 return false; 125 } 126 if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { 127 // Give up if there are any uses of the carry-out from instructions like 128 // V_ADD_CO_U32. The shrunken form of the instruction would write it to vcc 129 // instead of to a virtual register. 130 if (!MRI->use_nodbg_empty(SDst->getReg())) 131 return false; 132 } 133 // check if other than abs|neg modifiers are set (opsel for example) 134 const int64_t Mask = ~(SISrcMods::ABS | SISrcMods::NEG); 135 if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) || 136 !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) || 137 !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) || 138 !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0)) { 139 LLVM_DEBUG(dbgs() << " Inst has non-default modifiers\n"); 140 return false; 141 } 142 return true; 143 } 144 145 int GCNDPPCombine::getDPPOp(unsigned Op, bool IsShrinkable) const { 146 auto DPP32 = AMDGPU::getDPPOp32(Op); 147 if (IsShrinkable) { 148 assert(DPP32 == -1); 149 auto E32 = AMDGPU::getVOPe32(Op); 150 DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32); 151 } 152 return (DPP32 == -1 || TII->pseudoToMCOpcode(DPP32) == -1) ? -1 : DPP32; 153 } 154 155 // tracks the register operand definition and returns: 156 // 1. immediate operand used to initialize the register if found 157 // 2. nullptr if the register operand is undef 158 // 3. the operand itself otherwise 159 MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const { 160 auto *Def = getVRegSubRegDef(getRegSubRegPair(OldOpnd), *MRI); 161 if (!Def) 162 return nullptr; 163 164 switch(Def->getOpcode()) { 165 default: break; 166 case AMDGPU::IMPLICIT_DEF: 167 return nullptr; 168 case AMDGPU::COPY: 169 case AMDGPU::V_MOV_B32_e32: 170 case AMDGPU::V_MOV_B64_PSEUDO: 171 case AMDGPU::V_MOV_B64_e32: 172 case AMDGPU::V_MOV_B64_e64: { 173 auto &Op1 = Def->getOperand(1); 174 if (Op1.isImm()) 175 return &Op1; 176 break; 177 } 178 } 179 return &OldOpnd; 180 } 181 182 MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, 183 MachineInstr &MovMI, 184 RegSubRegPair CombOldVGPR, 185 bool CombBCZ, 186 bool IsShrinkable) const { 187 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp || 188 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp || 189 MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 190 191 auto OrigOp = OrigMI.getOpcode(); 192 auto DPPOp = getDPPOp(OrigOp, IsShrinkable); 193 if (DPPOp == -1) { 194 LLVM_DEBUG(dbgs() << " failed: no DPP opcode\n"); 195 return nullptr; 196 } 197 198 auto DPPInst = BuildMI(*OrigMI.getParent(), OrigMI, 199 OrigMI.getDebugLoc(), TII->get(DPPOp)) 200 .setMIFlags(OrigMI.getFlags()); 201 202 bool Fail = false; 203 do { 204 auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst); 205 assert(Dst); 206 DPPInst.add(*Dst); 207 int NumOperands = 1; 208 209 const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old); 210 if (OldIdx != -1) { 211 assert(OldIdx == NumOperands); 212 assert(isOfRegClass( 213 CombOldVGPR, 214 *MRI->getRegClass( 215 TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg()), 216 *MRI)); 217 auto *Def = getVRegSubRegDef(CombOldVGPR, *MRI); 218 DPPInst.addReg(CombOldVGPR.Reg, Def ? 0 : RegState::Undef, 219 CombOldVGPR.SubReg); 220 ++NumOperands; 221 } else { 222 // TODO: this discards MAC/FMA instructions for now, let's add it later 223 LLVM_DEBUG(dbgs() << " failed: no old operand in DPP instruction," 224 " TBD\n"); 225 Fail = true; 226 break; 227 } 228 229 if (auto *Mod0 = TII->getNamedOperand(OrigMI, 230 AMDGPU::OpName::src0_modifiers)) { 231 assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp, 232 AMDGPU::OpName::src0_modifiers)); 233 assert(0LL == (Mod0->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))); 234 DPPInst.addImm(Mod0->getImm()); 235 ++NumOperands; 236 } else if (AMDGPU::getNamedOperandIdx(DPPOp, 237 AMDGPU::OpName::src0_modifiers) != -1) { 238 DPPInst.addImm(0); 239 ++NumOperands; 240 } 241 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); 242 assert(Src0); 243 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) { 244 LLVM_DEBUG(dbgs() << " failed: src0 is illegal\n"); 245 Fail = true; 246 break; 247 } 248 DPPInst.add(*Src0); 249 DPPInst->getOperand(NumOperands).setIsKill(false); 250 ++NumOperands; 251 252 if (auto *Mod1 = TII->getNamedOperand(OrigMI, 253 AMDGPU::OpName::src1_modifiers)) { 254 assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp, 255 AMDGPU::OpName::src1_modifiers)); 256 assert(0LL == (Mod1->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))); 257 DPPInst.addImm(Mod1->getImm()); 258 ++NumOperands; 259 } else if (AMDGPU::getNamedOperandIdx(DPPOp, 260 AMDGPU::OpName::src1_modifiers) != -1) { 261 DPPInst.addImm(0); 262 ++NumOperands; 263 } 264 if (auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1)) { 265 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src1)) { 266 LLVM_DEBUG(dbgs() << " failed: src1 is illegal\n"); 267 Fail = true; 268 break; 269 } 270 DPPInst.add(*Src1); 271 ++NumOperands; 272 } 273 274 if (auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2)) { 275 if (!TII->getNamedOperand(*DPPInst.getInstr(), AMDGPU::OpName::src2) || 276 !TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src2)) { 277 LLVM_DEBUG(dbgs() << " failed: src2 is illegal\n"); 278 Fail = true; 279 break; 280 } 281 DPPInst.add(*Src2); 282 } 283 284 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl)); 285 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask)); 286 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask)); 287 DPPInst.addImm(CombBCZ ? 1 : 0); 288 } while (false); 289 290 if (Fail) { 291 DPPInst.getInstr()->eraseFromParent(); 292 return nullptr; 293 } 294 LLVM_DEBUG(dbgs() << " combined: " << *DPPInst.getInstr()); 295 return DPPInst.getInstr(); 296 } 297 298 static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) { 299 assert(OldOpnd->isImm()); 300 switch (OrigMIOp) { 301 default: break; 302 case AMDGPU::V_ADD_U32_e32: 303 case AMDGPU::V_ADD_U32_e64: 304 case AMDGPU::V_ADD_CO_U32_e32: 305 case AMDGPU::V_ADD_CO_U32_e64: 306 case AMDGPU::V_OR_B32_e32: 307 case AMDGPU::V_OR_B32_e64: 308 case AMDGPU::V_SUBREV_U32_e32: 309 case AMDGPU::V_SUBREV_U32_e64: 310 case AMDGPU::V_SUBREV_CO_U32_e32: 311 case AMDGPU::V_SUBREV_CO_U32_e64: 312 case AMDGPU::V_MAX_U32_e32: 313 case AMDGPU::V_MAX_U32_e64: 314 case AMDGPU::V_XOR_B32_e32: 315 case AMDGPU::V_XOR_B32_e64: 316 if (OldOpnd->getImm() == 0) 317 return true; 318 break; 319 case AMDGPU::V_AND_B32_e32: 320 case AMDGPU::V_AND_B32_e64: 321 case AMDGPU::V_MIN_U32_e32: 322 case AMDGPU::V_MIN_U32_e64: 323 if (static_cast<uint32_t>(OldOpnd->getImm()) == 324 std::numeric_limits<uint32_t>::max()) 325 return true; 326 break; 327 case AMDGPU::V_MIN_I32_e32: 328 case AMDGPU::V_MIN_I32_e64: 329 if (static_cast<int32_t>(OldOpnd->getImm()) == 330 std::numeric_limits<int32_t>::max()) 331 return true; 332 break; 333 case AMDGPU::V_MAX_I32_e32: 334 case AMDGPU::V_MAX_I32_e64: 335 if (static_cast<int32_t>(OldOpnd->getImm()) == 336 std::numeric_limits<int32_t>::min()) 337 return true; 338 break; 339 case AMDGPU::V_MUL_I32_I24_e32: 340 case AMDGPU::V_MUL_I32_I24_e64: 341 case AMDGPU::V_MUL_U32_U24_e32: 342 case AMDGPU::V_MUL_U32_U24_e64: 343 if (OldOpnd->getImm() == 1) 344 return true; 345 break; 346 } 347 return false; 348 } 349 350 MachineInstr *GCNDPPCombine::createDPPInst( 351 MachineInstr &OrigMI, MachineInstr &MovMI, RegSubRegPair CombOldVGPR, 352 MachineOperand *OldOpndValue, bool CombBCZ, bool IsShrinkable) const { 353 assert(CombOldVGPR.Reg); 354 if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) { 355 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); 356 if (!Src1 || !Src1->isReg()) { 357 LLVM_DEBUG(dbgs() << " failed: no src1 or it isn't a register\n"); 358 return nullptr; 359 } 360 if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) { 361 LLVM_DEBUG(dbgs() << " failed: old immediate isn't an identity\n"); 362 return nullptr; 363 } 364 CombOldVGPR = getRegSubRegPair(*Src1); 365 auto MovDst = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst); 366 const TargetRegisterClass *RC = MRI->getRegClass(MovDst->getReg()); 367 if (!isOfRegClass(CombOldVGPR, *RC, *MRI)) { 368 LLVM_DEBUG(dbgs() << " failed: src1 has wrong register class\n"); 369 return nullptr; 370 } 371 } 372 return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ, IsShrinkable); 373 } 374 375 // returns true if MI doesn't have OpndName immediate operand or the 376 // operand has Value 377 bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName, 378 int64_t Value, int64_t Mask) const { 379 auto *Imm = TII->getNamedOperand(MI, OpndName); 380 if (!Imm) 381 return true; 382 383 assert(Imm->isImm()); 384 return (Imm->getImm() & Mask) == Value; 385 } 386 387 bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const { 388 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp || 389 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp || 390 MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 391 LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI); 392 393 auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst); 394 assert(DstOpnd && DstOpnd->isReg()); 395 auto DPPMovReg = DstOpnd->getReg(); 396 if (DPPMovReg.isPhysical()) { 397 LLVM_DEBUG(dbgs() << " failed: dpp move writes physreg\n"); 398 return false; 399 } 400 if (execMayBeModifiedBeforeAnyUse(*MRI, DPPMovReg, MovMI)) { 401 LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same" 402 " for all uses\n"); 403 return false; 404 } 405 406 if (MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO || 407 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp) { 408 auto *DppCtrl = TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl); 409 assert(DppCtrl && DppCtrl->isImm()); 410 if (!AMDGPU::isLegal64BitDPPControl(DppCtrl->getImm())) { 411 LLVM_DEBUG(dbgs() << " failed: 64 bit dpp move uses unsupported" 412 " control value\n"); 413 // Let it split, then control may become legal. 414 return false; 415 } 416 } 417 418 auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask); 419 assert(RowMaskOpnd && RowMaskOpnd->isImm()); 420 auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask); 421 assert(BankMaskOpnd && BankMaskOpnd->isImm()); 422 const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF && 423 BankMaskOpnd->getImm() == 0xF; 424 425 auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl); 426 assert(BCZOpnd && BCZOpnd->isImm()); 427 bool BoundCtrlZero = BCZOpnd->getImm(); 428 429 auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old); 430 auto *SrcOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); 431 assert(OldOpnd && OldOpnd->isReg()); 432 assert(SrcOpnd && SrcOpnd->isReg()); 433 if (OldOpnd->getReg().isPhysical() || SrcOpnd->getReg().isPhysical()) { 434 LLVM_DEBUG(dbgs() << " failed: dpp move reads physreg\n"); 435 return false; 436 } 437 438 auto * const OldOpndValue = getOldOpndValue(*OldOpnd); 439 // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else 440 // We could use: assert(!OldOpndValue || OldOpndValue->isImm()) 441 // but the third option is used to distinguish undef from non-immediate 442 // to reuse IMPLICIT_DEF instruction later 443 assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd); 444 445 bool CombBCZ = false; 446 447 if (MaskAllLanes && BoundCtrlZero) { // [1] 448 CombBCZ = true; 449 } else { 450 if (!OldOpndValue || !OldOpndValue->isImm()) { 451 LLVM_DEBUG(dbgs() << " failed: the DPP mov isn't combinable\n"); 452 return false; 453 } 454 455 if (OldOpndValue->getParent()->getParent() != MovMI.getParent()) { 456 LLVM_DEBUG(dbgs() << 457 " failed: old reg def and mov should be in the same BB\n"); 458 return false; 459 } 460 461 if (OldOpndValue->getImm() == 0) { 462 if (MaskAllLanes) { 463 assert(!BoundCtrlZero); // by check [1] 464 CombBCZ = true; 465 } 466 } else if (BoundCtrlZero) { 467 assert(!MaskAllLanes); // by check [1] 468 LLVM_DEBUG(dbgs() << 469 " failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n"); 470 return false; 471 } 472 } 473 474 LLVM_DEBUG(dbgs() << " old="; 475 if (!OldOpndValue) 476 dbgs() << "undef"; 477 else 478 dbgs() << *OldOpndValue; 479 dbgs() << ", bound_ctrl=" << CombBCZ << '\n'); 480 481 SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs; 482 DenseMap<MachineInstr*, SmallVector<unsigned, 4>> RegSeqWithOpNos; 483 auto CombOldVGPR = getRegSubRegPair(*OldOpnd); 484 // try to reuse previous old reg if its undefined (IMPLICIT_DEF) 485 if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef 486 const TargetRegisterClass *RC = MRI->getRegClass(DPPMovReg); 487 CombOldVGPR = RegSubRegPair( 488 MRI->createVirtualRegister(RC)); 489 auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(), 490 TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg); 491 DPPMIs.push_back(UndefInst.getInstr()); 492 } 493 494 OrigMIs.push_back(&MovMI); 495 bool Rollback = true; 496 SmallVector<MachineOperand*, 16> Uses; 497 498 for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) { 499 Uses.push_back(&Use); 500 } 501 502 while (!Uses.empty()) { 503 MachineOperand *Use = Uses.pop_back_val(); 504 Rollback = true; 505 506 auto &OrigMI = *Use->getParent(); 507 LLVM_DEBUG(dbgs() << " try: " << OrigMI); 508 509 auto OrigOp = OrigMI.getOpcode(); 510 if (OrigOp == AMDGPU::REG_SEQUENCE) { 511 Register FwdReg = OrigMI.getOperand(0).getReg(); 512 unsigned FwdSubReg = 0; 513 514 if (execMayBeModifiedBeforeAnyUse(*MRI, FwdReg, OrigMI)) { 515 LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same" 516 " for all uses\n"); 517 break; 518 } 519 520 unsigned OpNo, E = OrigMI.getNumOperands(); 521 for (OpNo = 1; OpNo < E; OpNo += 2) { 522 if (OrigMI.getOperand(OpNo).getReg() == DPPMovReg) { 523 FwdSubReg = OrigMI.getOperand(OpNo + 1).getImm(); 524 break; 525 } 526 } 527 528 if (!FwdSubReg) 529 break; 530 531 for (auto &Op : MRI->use_nodbg_operands(FwdReg)) { 532 if (Op.getSubReg() == FwdSubReg) 533 Uses.push_back(&Op); 534 } 535 RegSeqWithOpNos[&OrigMI].push_back(OpNo); 536 continue; 537 } 538 539 bool IsShrinkable = isShrinkable(OrigMI); 540 if (!(IsShrinkable || TII->isVOP1(OrigOp) || TII->isVOP2(OrigOp))) { 541 LLVM_DEBUG(dbgs() << " failed: not VOP1/2/3\n"); 542 break; 543 } 544 545 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); 546 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); 547 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1] 548 LLVM_DEBUG(dbgs() << " failed: no suitable operands\n"); 549 break; 550 } 551 552 assert(Src0 && "Src1 without Src0?"); 553 if (Src1 && Src1->isIdenticalTo(*Src0)) { 554 assert(Src1->isReg()); 555 LLVM_DEBUG( 556 dbgs() 557 << " " << OrigMI 558 << " failed: DPP register is used more than once per instruction\n"); 559 break; 560 } 561 562 LLVM_DEBUG(dbgs() << " combining: " << OrigMI); 563 if (Use == Src0) { 564 if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR, 565 OldOpndValue, CombBCZ, IsShrinkable)) { 566 DPPMIs.push_back(DPPInst); 567 Rollback = false; 568 } 569 } else { 570 assert(Use == Src1 && OrigMI.isCommutable()); // by check [1] 571 auto *BB = OrigMI.getParent(); 572 auto *NewMI = BB->getParent()->CloneMachineInstr(&OrigMI); 573 BB->insert(OrigMI, NewMI); 574 if (TII->commuteInstruction(*NewMI)) { 575 LLVM_DEBUG(dbgs() << " commuted: " << *NewMI); 576 if (auto *DPPInst = 577 createDPPInst(*NewMI, MovMI, CombOldVGPR, OldOpndValue, CombBCZ, 578 IsShrinkable)) { 579 DPPMIs.push_back(DPPInst); 580 Rollback = false; 581 } 582 } else 583 LLVM_DEBUG(dbgs() << " failed: cannot be commuted\n"); 584 NewMI->eraseFromParent(); 585 } 586 if (Rollback) 587 break; 588 OrigMIs.push_back(&OrigMI); 589 } 590 591 Rollback |= !Uses.empty(); 592 593 for (auto *MI : *(Rollback? &DPPMIs : &OrigMIs)) 594 MI->eraseFromParent(); 595 596 if (!Rollback) { 597 for (auto &S : RegSeqWithOpNos) { 598 if (MRI->use_nodbg_empty(S.first->getOperand(0).getReg())) { 599 S.first->eraseFromParent(); 600 continue; 601 } 602 while (!S.second.empty()) 603 S.first->getOperand(S.second.pop_back_val()).setIsUndef(true); 604 } 605 } 606 607 return !Rollback; 608 } 609 610 bool GCNDPPCombine::runOnMachineFunction(MachineFunction &MF) { 611 ST = &MF.getSubtarget<GCNSubtarget>(); 612 if (!ST->hasDPP() || skipFunction(MF.getFunction())) 613 return false; 614 615 MRI = &MF.getRegInfo(); 616 TII = ST->getInstrInfo(); 617 618 bool Changed = false; 619 for (auto &MBB : MF) { 620 for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) { 621 if (MI.getOpcode() == AMDGPU::V_MOV_B32_dpp && combineDPPMov(MI)) { 622 Changed = true; 623 ++NumDPPMovsCombined; 624 } else if (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO || 625 MI.getOpcode() == AMDGPU::V_MOV_B64_dpp) { 626 if (ST->has64BitDPP() && combineDPPMov(MI)) { 627 Changed = true; 628 ++NumDPPMovsCombined; 629 } else { 630 auto Split = TII->expandMovDPP64(MI); 631 for (auto M : { Split.first, Split.second }) { 632 if (M && combineDPPMov(*M)) 633 ++NumDPPMovsCombined; 634 } 635 Changed = true; 636 } 637 } 638 } 639 } 640 return Changed; 641 } 642