1 //=======- GCNDPPCombine.cpp - optimization for DPP instructions ---==========//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0
9 // operand. If any of the use instruction cannot be combined with the mov the
10 // whole sequence is reverted.
11 //
12 // $old = ...
13 // $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane,
14 //                            dpp_controls..., $row_mask, $bank_mask, $bound_ctrl
15 // $res = VALU $dpp_value [, src1]
16 //
17 // to
18 //
19 // $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,]
20 //                 dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl
21 //
22 // Combining rules :
23 //
24 // if $row_mask and $bank_mask are fully enabled (0xF) and
25 //    $bound_ctrl==DPP_BOUND_ZERO or $old==0
26 // -> $combined_old = undef,
27 //    $combined_bound_ctrl = DPP_BOUND_ZERO
28 //
29 // if the VALU op is binary and
30 //    $bound_ctrl==DPP_BOUND_OFF and
31 //    $old==identity value (immediate) for the VALU op
32 // -> $combined_old = src1,
33 //    $combined_bound_ctrl = DPP_BOUND_OFF
34 //
35 // Otherwise cancel.
36 //
37 // The mov_dpp instruction should reside in the same BB as all its uses
38 //===----------------------------------------------------------------------===//
39 
40 #include "AMDGPU.h"
41 #include "GCNSubtarget.h"
42 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "gcn-dpp-combine"
49 
50 STATISTIC(NumDPPMovsCombined, "Number of DPP moves combined.");
51 
52 namespace {
53 
54 class GCNDPPCombine : public MachineFunctionPass {
55   MachineRegisterInfo *MRI;
56   const SIInstrInfo *TII;
57   const GCNSubtarget *ST;
58 
59   using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
60 
61   MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const;
62 
63   MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
64                               RegSubRegPair CombOldVGPR,
65                               MachineOperand *OldOpnd, bool CombBCZ,
66                               bool IsShrinkable) const;
67 
68   MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
69                               RegSubRegPair CombOldVGPR, bool CombBCZ,
70                               bool IsShrinkable) const;
71 
72   bool hasNoImmOrEqual(MachineInstr &MI,
73                        unsigned OpndName,
74                        int64_t Value,
75                        int64_t Mask = -1) const;
76 
77   bool combineDPPMov(MachineInstr &MI) const;
78 
79 public:
80   static char ID;
81 
82   GCNDPPCombine() : MachineFunctionPass(ID) {
83     initializeGCNDPPCombinePass(*PassRegistry::getPassRegistry());
84   }
85 
86   bool runOnMachineFunction(MachineFunction &MF) override;
87 
88   StringRef getPassName() const override { return "GCN DPP Combine"; }
89 
90   void getAnalysisUsage(AnalysisUsage &AU) const override {
91     AU.setPreservesCFG();
92     MachineFunctionPass::getAnalysisUsage(AU);
93   }
94 
95   MachineFunctionProperties getRequiredProperties() const override {
96     return MachineFunctionProperties()
97       .set(MachineFunctionProperties::Property::IsSSA);
98   }
99 
100 private:
101   int getDPPOp(unsigned Op, bool IsShrinkable) const;
102   bool isShrinkable(MachineInstr &MI) const;
103 };
104 
105 } // end anonymous namespace
106 
107 INITIALIZE_PASS(GCNDPPCombine, DEBUG_TYPE, "GCN DPP Combine", false, false)
108 
109 char GCNDPPCombine::ID = 0;
110 
111 char &llvm::GCNDPPCombineID = GCNDPPCombine::ID;
112 
113 FunctionPass *llvm::createGCNDPPCombinePass() {
114   return new GCNDPPCombine();
115 }
116 
117 bool GCNDPPCombine::isShrinkable(MachineInstr &MI) const {
118   unsigned Op = MI.getOpcode();
119   if (!TII->isVOP3(Op)) {
120     return false;
121   }
122   if (!TII->hasVALU32BitEncoding(Op)) {
123     LLVM_DEBUG(dbgs() << "  Inst hasn't e32 equivalent\n");
124     return false;
125   }
126   if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) {
127     // Give up if there are any uses of the carry-out from instructions like
128     // V_ADD_CO_U32. The shrunken form of the instruction would write it to vcc
129     // instead of to a virtual register.
130     if (!MRI->use_nodbg_empty(SDst->getReg()))
131       return false;
132   }
133   // check if other than abs|neg modifiers are set (opsel for example)
134   const int64_t Mask = ~(SISrcMods::ABS | SISrcMods::NEG);
135   if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) ||
136       !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) ||
137       !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) ||
138       !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0)) {
139     LLVM_DEBUG(dbgs() << "  Inst has non-default modifiers\n");
140     return false;
141   }
142   return true;
143 }
144 
145 int GCNDPPCombine::getDPPOp(unsigned Op, bool IsShrinkable) const {
146   int DPP32 = AMDGPU::getDPPOp32(Op);
147   if (IsShrinkable) {
148     assert(DPP32 == -1);
149     int E32 = AMDGPU::getVOPe32(Op);
150     DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32);
151   }
152   if (DPP32 != -1 && TII->pseudoToMCOpcode(DPP32) != -1)
153     return DPP32;
154   int DPP64 = -1;
155   if (ST->hasVOP3DPP())
156     DPP64 = AMDGPU::getDPPOp64(Op);
157   if (DPP64 != -1 && TII->pseudoToMCOpcode(DPP64) != -1)
158     return DPP64;
159   return -1;
160 }
161 
162 // tracks the register operand definition and returns:
163 //   1. immediate operand used to initialize the register if found
164 //   2. nullptr if the register operand is undef
165 //   3. the operand itself otherwise
166 MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const {
167   auto *Def = getVRegSubRegDef(getRegSubRegPair(OldOpnd), *MRI);
168   if (!Def)
169     return nullptr;
170 
171   switch(Def->getOpcode()) {
172   default: break;
173   case AMDGPU::IMPLICIT_DEF:
174     return nullptr;
175   case AMDGPU::COPY:
176   case AMDGPU::V_MOV_B32_e32:
177   case AMDGPU::V_MOV_B64_PSEUDO:
178   case AMDGPU::V_MOV_B64_e32:
179   case AMDGPU::V_MOV_B64_e64: {
180     auto &Op1 = Def->getOperand(1);
181     if (Op1.isImm())
182       return &Op1;
183     break;
184   }
185   }
186   return &OldOpnd;
187 }
188 
189 MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
190                                            MachineInstr &MovMI,
191                                            RegSubRegPair CombOldVGPR,
192                                            bool CombBCZ,
193                                            bool IsShrinkable) const {
194   assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
195          MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
196          MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
197 
198   bool HasVOP3DPP = ST->hasVOP3DPP();
199   auto OrigOp = OrigMI.getOpcode();
200   auto DPPOp = getDPPOp(OrigOp, IsShrinkable);
201   if (DPPOp == -1) {
202     LLVM_DEBUG(dbgs() << "  failed: no DPP opcode\n");
203     return nullptr;
204   }
205 
206   auto DPPInst = BuildMI(*OrigMI.getParent(), OrigMI,
207                          OrigMI.getDebugLoc(), TII->get(DPPOp))
208     .setMIFlags(OrigMI.getFlags());
209 
210   bool Fail = false;
211   do {
212     int NumOperands = 0;
213     if (auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst)) {
214       DPPInst.add(*Dst);
215       ++NumOperands;
216     }
217     if (auto *SDst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::sdst)) {
218       if (TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, SDst)) {
219         DPPInst.add(*SDst);
220         ++NumOperands;
221       }
222       // If we shrunk a 64bit vop3b to 32bits, just ignore the sdst
223     }
224 
225     int OrigOpE32 = AMDGPU::getVOPe32(OrigOp);
226     const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old);
227     if (OldIdx != -1) {
228       assert(OldIdx == NumOperands);
229       assert(isOfRegClass(
230           CombOldVGPR,
231           *MRI->getRegClass(
232               TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg()),
233           *MRI));
234       auto *Def = getVRegSubRegDef(CombOldVGPR, *MRI);
235       DPPInst.addReg(CombOldVGPR.Reg, Def ? 0 : RegState::Undef,
236                      CombOldVGPR.SubReg);
237       ++NumOperands;
238     } else if (TII->isVOPC(DPPOp) || (TII->isVOP3(DPPOp) && OrigOpE32 != -1 &&
239                                       TII->isVOPC(OrigOpE32))) {
240       // VOPC DPP and VOPC promoted to VOP3 DPP do not have an old operand
241       // because they write to SGPRs not VGPRs
242     } else {
243       // TODO: this discards MAC/FMA instructions for now, let's add it later
244       LLVM_DEBUG(dbgs() << "  failed: no old operand in DPP instruction,"
245                            " TBD\n");
246       Fail = true;
247       break;
248     }
249 
250     if (auto *Mod0 = TII->getNamedOperand(OrigMI,
251                                           AMDGPU::OpName::src0_modifiers)) {
252       assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
253                                           AMDGPU::OpName::src0_modifiers));
254       assert(HasVOP3DPP ||
255              (0LL == (Mod0->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
256       DPPInst.addImm(Mod0->getImm());
257       ++NumOperands;
258     } else if (AMDGPU::getNamedOperandIdx(DPPOp,
259                    AMDGPU::OpName::src0_modifiers) != -1) {
260       DPPInst.addImm(0);
261       ++NumOperands;
262     }
263     auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
264     assert(Src0);
265     if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) {
266       LLVM_DEBUG(dbgs() << "  failed: src0 is illegal\n");
267       Fail = true;
268       break;
269     }
270     DPPInst.add(*Src0);
271     DPPInst->getOperand(NumOperands).setIsKill(false);
272     ++NumOperands;
273 
274     if (auto *Mod1 = TII->getNamedOperand(OrigMI,
275                                           AMDGPU::OpName::src1_modifiers)) {
276       assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
277                                           AMDGPU::OpName::src1_modifiers));
278       assert(HasVOP3DPP ||
279              (0LL == (Mod1->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
280       DPPInst.addImm(Mod1->getImm());
281       ++NumOperands;
282     } else if (AMDGPU::getNamedOperandIdx(DPPOp,
283                    AMDGPU::OpName::src1_modifiers) != -1) {
284       DPPInst.addImm(0);
285       ++NumOperands;
286     }
287     auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
288     if (Src1) {
289       if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src1)) {
290         LLVM_DEBUG(dbgs() << "  failed: src1 is illegal\n");
291         Fail = true;
292         break;
293       }
294       DPPInst.add(*Src1);
295       ++NumOperands;
296     }
297     if (auto *Mod2 =
298             TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2_modifiers)) {
299       assert(NumOperands ==
300              AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::src2_modifiers));
301       assert(HasVOP3DPP ||
302              (0LL == (Mod2->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
303       DPPInst.addImm(Mod2->getImm());
304       ++NumOperands;
305     }
306     auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
307     if (Src2) {
308       if (!TII->getNamedOperand(*DPPInst.getInstr(), AMDGPU::OpName::src2) ||
309           !TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src2)) {
310         LLVM_DEBUG(dbgs() << "  failed: src2 is illegal\n");
311         Fail = true;
312         break;
313       }
314       DPPInst.add(*Src2);
315       ++NumOperands;
316     }
317     if (HasVOP3DPP) {
318       auto *ClampOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::clamp);
319       if (ClampOpr &&
320           AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::clamp) != -1) {
321         DPPInst.addImm(ClampOpr->getImm());
322       }
323       auto *VdstInOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst_in);
324       if (VdstInOpr &&
325           AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::vdst_in) != -1) {
326         DPPInst.add(*VdstInOpr);
327       }
328       auto *OmodOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::omod);
329       if (OmodOpr &&
330           AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::omod) != -1) {
331         DPPInst.addImm(OmodOpr->getImm());
332       }
333       // Validate OP_SEL has to be set to all 0 and OP_SEL_HI has to be set to
334       // all 1.
335       if (auto *OpSelOpr =
336               TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel)) {
337         auto OpSel = OpSelOpr->getImm();
338         if (OpSel != 0) {
339           LLVM_DEBUG(dbgs() << "  failed: op_sel must be zero\n");
340           Fail = true;
341           break;
342         }
343         if (AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::op_sel) != -1)
344           DPPInst.addImm(OpSel);
345       }
346       if (auto *OpSelHiOpr =
347               TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel_hi)) {
348         auto OpSelHi = OpSelHiOpr->getImm();
349         // Only vop3p has op_sel_hi, and all vop3p have 3 operands, so check
350         // the bitmask for 3 op_sel_hi bits set
351         assert(Src2 && "Expected vop3p with 3 operands");
352         if (OpSelHi != 7) {
353           LLVM_DEBUG(dbgs() << "  failed: op_sel_hi must be all set to one\n");
354           Fail = true;
355           break;
356         }
357         if (AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::op_sel_hi) != -1)
358           DPPInst.addImm(OpSelHi);
359       }
360       auto *NegOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_lo);
361       if (NegOpr &&
362           AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::neg_lo) != -1) {
363         DPPInst.addImm(NegOpr->getImm());
364       }
365       auto *NegHiOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_hi);
366       if (NegHiOpr &&
367           AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::neg_hi) != -1) {
368         DPPInst.addImm(NegHiOpr->getImm());
369       }
370     }
371     DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl));
372     DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask));
373     DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask));
374     DPPInst.addImm(CombBCZ ? 1 : 0);
375   } while (false);
376 
377   if (Fail) {
378     DPPInst.getInstr()->eraseFromParent();
379     return nullptr;
380   }
381   LLVM_DEBUG(dbgs() << "  combined:  " << *DPPInst.getInstr());
382   return DPPInst.getInstr();
383 }
384 
385 static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) {
386   assert(OldOpnd->isImm());
387   switch (OrigMIOp) {
388   default: break;
389   case AMDGPU::V_ADD_U32_e32:
390   case AMDGPU::V_ADD_U32_e64:
391   case AMDGPU::V_ADD_CO_U32_e32:
392   case AMDGPU::V_ADD_CO_U32_e64:
393   case AMDGPU::V_OR_B32_e32:
394   case AMDGPU::V_OR_B32_e64:
395   case AMDGPU::V_SUBREV_U32_e32:
396   case AMDGPU::V_SUBREV_U32_e64:
397   case AMDGPU::V_SUBREV_CO_U32_e32:
398   case AMDGPU::V_SUBREV_CO_U32_e64:
399   case AMDGPU::V_MAX_U32_e32:
400   case AMDGPU::V_MAX_U32_e64:
401   case AMDGPU::V_XOR_B32_e32:
402   case AMDGPU::V_XOR_B32_e64:
403     if (OldOpnd->getImm() == 0)
404       return true;
405     break;
406   case AMDGPU::V_AND_B32_e32:
407   case AMDGPU::V_AND_B32_e64:
408   case AMDGPU::V_MIN_U32_e32:
409   case AMDGPU::V_MIN_U32_e64:
410     if (static_cast<uint32_t>(OldOpnd->getImm()) ==
411         std::numeric_limits<uint32_t>::max())
412       return true;
413     break;
414   case AMDGPU::V_MIN_I32_e32:
415   case AMDGPU::V_MIN_I32_e64:
416     if (static_cast<int32_t>(OldOpnd->getImm()) ==
417         std::numeric_limits<int32_t>::max())
418       return true;
419     break;
420   case AMDGPU::V_MAX_I32_e32:
421   case AMDGPU::V_MAX_I32_e64:
422     if (static_cast<int32_t>(OldOpnd->getImm()) ==
423         std::numeric_limits<int32_t>::min())
424       return true;
425     break;
426   case AMDGPU::V_MUL_I32_I24_e32:
427   case AMDGPU::V_MUL_I32_I24_e64:
428   case AMDGPU::V_MUL_U32_U24_e32:
429   case AMDGPU::V_MUL_U32_U24_e64:
430     if (OldOpnd->getImm() == 1)
431       return true;
432     break;
433   }
434   return false;
435 }
436 
437 MachineInstr *GCNDPPCombine::createDPPInst(
438     MachineInstr &OrigMI, MachineInstr &MovMI, RegSubRegPair CombOldVGPR,
439     MachineOperand *OldOpndValue, bool CombBCZ, bool IsShrinkable) const {
440   assert(CombOldVGPR.Reg);
441   if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) {
442     auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
443     if (!Src1 || !Src1->isReg()) {
444       LLVM_DEBUG(dbgs() << "  failed: no src1 or it isn't a register\n");
445       return nullptr;
446     }
447     if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) {
448       LLVM_DEBUG(dbgs() << "  failed: old immediate isn't an identity\n");
449       return nullptr;
450     }
451     CombOldVGPR = getRegSubRegPair(*Src1);
452     auto MovDst = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
453     const TargetRegisterClass *RC = MRI->getRegClass(MovDst->getReg());
454     if (!isOfRegClass(CombOldVGPR, *RC, *MRI)) {
455       LLVM_DEBUG(dbgs() << "  failed: src1 has wrong register class\n");
456       return nullptr;
457     }
458   }
459   return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ, IsShrinkable);
460 }
461 
462 // returns true if MI doesn't have OpndName immediate operand or the
463 // operand has Value
464 bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,
465                                     int64_t Value, int64_t Mask) const {
466   auto *Imm = TII->getNamedOperand(MI, OpndName);
467   if (!Imm)
468     return true;
469 
470   assert(Imm->isImm());
471   return (Imm->getImm() & Mask) == Value;
472 }
473 
474 bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
475   assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
476          MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
477          MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
478   LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
479 
480   auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
481   assert(DstOpnd && DstOpnd->isReg());
482   auto DPPMovReg = DstOpnd->getReg();
483   if (DPPMovReg.isPhysical()) {
484     LLVM_DEBUG(dbgs() << "  failed: dpp move writes physreg\n");
485     return false;
486   }
487   if (execMayBeModifiedBeforeAnyUse(*MRI, DPPMovReg, MovMI)) {
488     LLVM_DEBUG(dbgs() << "  failed: EXEC mask should remain the same"
489                          " for all uses\n");
490     return false;
491   }
492 
493   if (MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
494       MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
495     auto *DppCtrl = TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl);
496     assert(DppCtrl && DppCtrl->isImm());
497     if (!AMDGPU::isLegal64BitDPPControl(DppCtrl->getImm())) {
498       LLVM_DEBUG(dbgs() << "  failed: 64 bit dpp move uses unsupported"
499                            " control value\n");
500       // Let it split, then control may become legal.
501       return false;
502     }
503   }
504 
505   auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
506   assert(RowMaskOpnd && RowMaskOpnd->isImm());
507   auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
508   assert(BankMaskOpnd && BankMaskOpnd->isImm());
509   const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF &&
510                             BankMaskOpnd->getImm() == 0xF;
511 
512   auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl);
513   assert(BCZOpnd && BCZOpnd->isImm());
514   bool BoundCtrlZero = BCZOpnd->getImm();
515 
516   auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old);
517   auto *SrcOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
518   assert(OldOpnd && OldOpnd->isReg());
519   assert(SrcOpnd && SrcOpnd->isReg());
520   if (OldOpnd->getReg().isPhysical() || SrcOpnd->getReg().isPhysical()) {
521     LLVM_DEBUG(dbgs() << "  failed: dpp move reads physreg\n");
522     return false;
523   }
524 
525   auto * const OldOpndValue = getOldOpndValue(*OldOpnd);
526   // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else
527   // We could use: assert(!OldOpndValue || OldOpndValue->isImm())
528   // but the third option is used to distinguish undef from non-immediate
529   // to reuse IMPLICIT_DEF instruction later
530   assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd);
531 
532   bool CombBCZ = false;
533 
534   if (MaskAllLanes && BoundCtrlZero) { // [1]
535     CombBCZ = true;
536   } else {
537     if (!OldOpndValue || !OldOpndValue->isImm()) {
538       LLVM_DEBUG(dbgs() << "  failed: the DPP mov isn't combinable\n");
539       return false;
540     }
541 
542     if (OldOpndValue->getImm() == 0) {
543       if (MaskAllLanes) {
544         assert(!BoundCtrlZero); // by check [1]
545         CombBCZ = true;
546       }
547     } else if (BoundCtrlZero) {
548       assert(!MaskAllLanes); // by check [1]
549       LLVM_DEBUG(dbgs() <<
550         "  failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n");
551       return false;
552     }
553   }
554 
555   LLVM_DEBUG(dbgs() << "  old=";
556     if (!OldOpndValue)
557       dbgs() << "undef";
558     else
559       dbgs() << *OldOpndValue;
560     dbgs() << ", bound_ctrl=" << CombBCZ << '\n');
561 
562   SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs;
563   DenseMap<MachineInstr*, SmallVector<unsigned, 4>> RegSeqWithOpNos;
564   auto CombOldVGPR = getRegSubRegPair(*OldOpnd);
565   // try to reuse previous old reg if its undefined (IMPLICIT_DEF)
566   if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef
567     const TargetRegisterClass *RC = MRI->getRegClass(DPPMovReg);
568     CombOldVGPR = RegSubRegPair(
569       MRI->createVirtualRegister(RC));
570     auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(),
571                              TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg);
572     DPPMIs.push_back(UndefInst.getInstr());
573   }
574 
575   OrigMIs.push_back(&MovMI);
576   bool Rollback = true;
577   SmallVector<MachineOperand*, 16> Uses;
578 
579   for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) {
580     Uses.push_back(&Use);
581   }
582 
583   while (!Uses.empty()) {
584     MachineOperand *Use = Uses.pop_back_val();
585     Rollback = true;
586 
587     auto &OrigMI = *Use->getParent();
588     LLVM_DEBUG(dbgs() << "  try: " << OrigMI);
589 
590     auto OrigOp = OrigMI.getOpcode();
591     if (OrigOp == AMDGPU::REG_SEQUENCE) {
592       Register FwdReg = OrigMI.getOperand(0).getReg();
593       unsigned FwdSubReg = 0;
594 
595       if (execMayBeModifiedBeforeAnyUse(*MRI, FwdReg, OrigMI)) {
596         LLVM_DEBUG(dbgs() << "  failed: EXEC mask should remain the same"
597                              " for all uses\n");
598         break;
599       }
600 
601       unsigned OpNo, E = OrigMI.getNumOperands();
602       for (OpNo = 1; OpNo < E; OpNo += 2) {
603         if (OrigMI.getOperand(OpNo).getReg() == DPPMovReg) {
604           FwdSubReg = OrigMI.getOperand(OpNo + 1).getImm();
605           break;
606         }
607       }
608 
609       if (!FwdSubReg)
610         break;
611 
612       for (auto &Op : MRI->use_nodbg_operands(FwdReg)) {
613         if (Op.getSubReg() == FwdSubReg)
614           Uses.push_back(&Op);
615       }
616       RegSeqWithOpNos[&OrigMI].push_back(OpNo);
617       continue;
618     }
619 
620     bool IsShrinkable = isShrinkable(OrigMI);
621     if (!(IsShrinkable ||
622           ((TII->isVOP3P(OrigOp) || TII->isVOPC(OrigOp) ||
623             TII->isVOP3(OrigOp)) &&
624            ST->hasVOP3DPP()) ||
625           TII->isVOP1(OrigOp) || TII->isVOP2(OrigOp))) {
626       LLVM_DEBUG(dbgs() << "  failed: not VOP1/2/3/3P/C\n");
627       break;
628     }
629     if (OrigMI.modifiesRegister(AMDGPU::EXEC, ST->getRegisterInfo())) {
630       LLVM_DEBUG(dbgs() << "  failed: can't combine v_cmpx\n");
631       break;
632     }
633 
634     auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0);
635     auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
636     if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1]
637       LLVM_DEBUG(dbgs() << "  failed: no suitable operands\n");
638       break;
639     }
640 
641     auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
642     assert(Src0 && "Src1 without Src0?");
643     if ((Use == Src0 && ((Src1 && Src1->isIdenticalTo(*Src0)) ||
644                          (Src2 && Src2->isIdenticalTo(*Src0)))) ||
645         (Use == Src1 && (Src1->isIdenticalTo(*Src0) ||
646                          (Src2 && Src2->isIdenticalTo(*Src1))))) {
647       LLVM_DEBUG(
648           dbgs()
649           << "  " << OrigMI
650           << "  failed: DPP register is used more than once per instruction\n");
651       break;
652     }
653 
654     LLVM_DEBUG(dbgs() << "  combining: " << OrigMI);
655     if (Use == Src0) {
656       if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR,
657                                         OldOpndValue, CombBCZ, IsShrinkable)) {
658         DPPMIs.push_back(DPPInst);
659         Rollback = false;
660       }
661     } else {
662       assert(Use == Src1 && OrigMI.isCommutable()); // by check [1]
663       auto *BB = OrigMI.getParent();
664       auto *NewMI = BB->getParent()->CloneMachineInstr(&OrigMI);
665       BB->insert(OrigMI, NewMI);
666       if (TII->commuteInstruction(*NewMI)) {
667         LLVM_DEBUG(dbgs() << "  commuted:  " << *NewMI);
668         if (auto *DPPInst =
669                 createDPPInst(*NewMI, MovMI, CombOldVGPR, OldOpndValue, CombBCZ,
670                               IsShrinkable)) {
671           DPPMIs.push_back(DPPInst);
672           Rollback = false;
673         }
674       } else
675         LLVM_DEBUG(dbgs() << "  failed: cannot be commuted\n");
676       NewMI->eraseFromParent();
677     }
678     if (Rollback)
679       break;
680     OrigMIs.push_back(&OrigMI);
681   }
682 
683   Rollback |= !Uses.empty();
684 
685   for (auto *MI : *(Rollback? &DPPMIs : &OrigMIs))
686     MI->eraseFromParent();
687 
688   if (!Rollback) {
689     for (auto &S : RegSeqWithOpNos) {
690       if (MRI->use_nodbg_empty(S.first->getOperand(0).getReg())) {
691         S.first->eraseFromParent();
692         continue;
693       }
694       while (!S.second.empty())
695         S.first->getOperand(S.second.pop_back_val()).setIsUndef(true);
696     }
697   }
698 
699   return !Rollback;
700 }
701 
702 bool GCNDPPCombine::runOnMachineFunction(MachineFunction &MF) {
703   ST = &MF.getSubtarget<GCNSubtarget>();
704   if (!ST->hasDPP() || skipFunction(MF.getFunction()))
705     return false;
706 
707   MRI = &MF.getRegInfo();
708   TII = ST->getInstrInfo();
709 
710   bool Changed = false;
711   for (auto &MBB : MF) {
712     for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
713       if (MI.getOpcode() == AMDGPU::V_MOV_B32_dpp && combineDPPMov(MI)) {
714         Changed = true;
715         ++NumDPPMovsCombined;
716       } else if (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
717                  MI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
718         if (ST->has64BitDPP() && combineDPPMov(MI)) {
719           Changed = true;
720           ++NumDPPMovsCombined;
721         } else {
722           auto Split = TII->expandMovDPP64(MI);
723           for (auto M : { Split.first, Split.second }) {
724             if (M && combineDPPMov(*M))
725               ++NumDPPMovsCombined;
726           }
727           Changed = true;
728         }
729       }
730     }
731   }
732   return Changed;
733 }
734