1 //=======- GCNDPPCombine.cpp - optimization for DPP instructions ---==========//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0
9 // operand. If any of the use instruction cannot be combined with the mov the
10 // whole sequence is reverted.
11 //
12 // $old = ...
13 // $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane,
14 // dpp_controls..., $row_mask, $bank_mask, $bound_ctrl
15 // $res = VALU $dpp_value [, src1]
16 //
17 // to
18 //
19 // $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,]
20 // dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl
21 //
22 // Combining rules :
23 //
24 // if $row_mask and $bank_mask are fully enabled (0xF) and
25 // $bound_ctrl==DPP_BOUND_ZERO or $old==0
26 // -> $combined_old = undef,
27 // $combined_bound_ctrl = DPP_BOUND_ZERO
28 //
29 // if the VALU op is binary and
30 // $bound_ctrl==DPP_BOUND_OFF and
31 // $old==identity value (immediate) for the VALU op
32 // -> $combined_old = src1,
33 // $combined_bound_ctrl = DPP_BOUND_OFF
34 //
35 // Otherwise cancel.
36 //
37 // The mov_dpp instruction should reside in the same BB as all its uses
38 //===----------------------------------------------------------------------===//
39
40 #include "AMDGPU.h"
41 #include "GCNSubtarget.h"
42 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45
46 using namespace llvm;
47
48 #define DEBUG_TYPE "gcn-dpp-combine"
49
50 STATISTIC(NumDPPMovsCombined, "Number of DPP moves combined.");
51
52 namespace {
53
54 class GCNDPPCombine : public MachineFunctionPass {
55 MachineRegisterInfo *MRI;
56 const SIInstrInfo *TII;
57 const GCNSubtarget *ST;
58
59 using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
60
61 MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const;
62
63 MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
64 RegSubRegPair CombOldVGPR,
65 MachineOperand *OldOpnd, bool CombBCZ,
66 bool IsShrinkable) const;
67
68 MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
69 RegSubRegPair CombOldVGPR, bool CombBCZ,
70 bool IsShrinkable) const;
71
72 bool hasNoImmOrEqual(MachineInstr &MI,
73 unsigned OpndName,
74 int64_t Value,
75 int64_t Mask = -1) const;
76
77 bool combineDPPMov(MachineInstr &MI) const;
78
79 public:
80 static char ID;
81
GCNDPPCombine()82 GCNDPPCombine() : MachineFunctionPass(ID) {
83 initializeGCNDPPCombinePass(*PassRegistry::getPassRegistry());
84 }
85
86 bool runOnMachineFunction(MachineFunction &MF) override;
87
getPassName() const88 StringRef getPassName() const override { return "GCN DPP Combine"; }
89
getAnalysisUsage(AnalysisUsage & AU) const90 void getAnalysisUsage(AnalysisUsage &AU) const override {
91 AU.setPreservesCFG();
92 MachineFunctionPass::getAnalysisUsage(AU);
93 }
94
getRequiredProperties() const95 MachineFunctionProperties getRequiredProperties() const override {
96 return MachineFunctionProperties()
97 .set(MachineFunctionProperties::Property::IsSSA);
98 }
99
100 private:
101 int getDPPOp(unsigned Op, bool IsShrinkable) const;
102 bool isShrinkable(MachineInstr &MI) const;
103 };
104
105 } // end anonymous namespace
106
107 INITIALIZE_PASS(GCNDPPCombine, DEBUG_TYPE, "GCN DPP Combine", false, false)
108
109 char GCNDPPCombine::ID = 0;
110
111 char &llvm::GCNDPPCombineID = GCNDPPCombine::ID;
112
createGCNDPPCombinePass()113 FunctionPass *llvm::createGCNDPPCombinePass() {
114 return new GCNDPPCombine();
115 }
116
isShrinkable(MachineInstr & MI) const117 bool GCNDPPCombine::isShrinkable(MachineInstr &MI) const {
118 unsigned Op = MI.getOpcode();
119 if (!TII->isVOP3(Op)) {
120 return false;
121 }
122 if (!TII->hasVALU32BitEncoding(Op)) {
123 LLVM_DEBUG(dbgs() << " Inst hasn't e32 equivalent\n");
124 return false;
125 }
126 if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) {
127 // Give up if there are any uses of the carry-out from instructions like
128 // V_ADD_CO_U32. The shrunken form of the instruction would write it to vcc
129 // instead of to a virtual register.
130 if (!MRI->use_nodbg_empty(SDst->getReg()))
131 return false;
132 }
133 // check if other than abs|neg modifiers are set (opsel for example)
134 const int64_t Mask = ~(SISrcMods::ABS | SISrcMods::NEG);
135 if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) ||
136 !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) ||
137 !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) ||
138 !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0)) {
139 LLVM_DEBUG(dbgs() << " Inst has non-default modifiers\n");
140 return false;
141 }
142 return true;
143 }
144
getDPPOp(unsigned Op,bool IsShrinkable) const145 int GCNDPPCombine::getDPPOp(unsigned Op, bool IsShrinkable) const {
146 int DPP32 = AMDGPU::getDPPOp32(Op);
147 if (IsShrinkable) {
148 assert(DPP32 == -1);
149 int E32 = AMDGPU::getVOPe32(Op);
150 DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32);
151 }
152 if (DPP32 != -1 && TII->pseudoToMCOpcode(DPP32) != -1)
153 return DPP32;
154 int DPP64 = -1;
155 if (ST->hasVOP3DPP())
156 DPP64 = AMDGPU::getDPPOp64(Op);
157 if (DPP64 != -1 && TII->pseudoToMCOpcode(DPP64) != -1)
158 return DPP64;
159 return -1;
160 }
161
162 // tracks the register operand definition and returns:
163 // 1. immediate operand used to initialize the register if found
164 // 2. nullptr if the register operand is undef
165 // 3. the operand itself otherwise
getOldOpndValue(MachineOperand & OldOpnd) const166 MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const {
167 auto *Def = getVRegSubRegDef(getRegSubRegPair(OldOpnd), *MRI);
168 if (!Def)
169 return nullptr;
170
171 switch(Def->getOpcode()) {
172 default: break;
173 case AMDGPU::IMPLICIT_DEF:
174 return nullptr;
175 case AMDGPU::COPY:
176 case AMDGPU::V_MOV_B32_e32:
177 case AMDGPU::V_MOV_B64_PSEUDO:
178 case AMDGPU::V_MOV_B64_e32:
179 case AMDGPU::V_MOV_B64_e64: {
180 auto &Op1 = Def->getOperand(1);
181 if (Op1.isImm())
182 return &Op1;
183 break;
184 }
185 }
186 return &OldOpnd;
187 }
188
createDPPInst(MachineInstr & OrigMI,MachineInstr & MovMI,RegSubRegPair CombOldVGPR,bool CombBCZ,bool IsShrinkable) const189 MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
190 MachineInstr &MovMI,
191 RegSubRegPair CombOldVGPR,
192 bool CombBCZ,
193 bool IsShrinkable) const {
194 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
195 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
196 MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
197
198 bool HasVOP3DPP = ST->hasVOP3DPP();
199 auto OrigOp = OrigMI.getOpcode();
200 auto DPPOp = getDPPOp(OrigOp, IsShrinkable);
201 if (DPPOp == -1) {
202 LLVM_DEBUG(dbgs() << " failed: no DPP opcode\n");
203 return nullptr;
204 }
205 int OrigOpE32 = AMDGPU::getVOPe32(OrigOp);
206 // Prior checks cover Mask with VOPC condition, but not on purpose
207 auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
208 assert(RowMaskOpnd && RowMaskOpnd->isImm());
209 auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
210 assert(BankMaskOpnd && BankMaskOpnd->isImm());
211 const bool MaskAllLanes =
212 RowMaskOpnd->getImm() == 0xF && BankMaskOpnd->getImm() == 0xF;
213 (void)MaskAllLanes;
214 assert(MaskAllLanes ||
215 !(TII->isVOPC(DPPOp) ||
216 (TII->isVOP3(DPPOp) && OrigOpE32 != -1 && TII->isVOPC(OrigOpE32))) &&
217 "VOPC cannot form DPP unless mask is full");
218
219 auto DPPInst = BuildMI(*OrigMI.getParent(), OrigMI,
220 OrigMI.getDebugLoc(), TII->get(DPPOp))
221 .setMIFlags(OrigMI.getFlags());
222
223 bool Fail = false;
224 do {
225 int NumOperands = 0;
226 if (auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst)) {
227 DPPInst.add(*Dst);
228 ++NumOperands;
229 }
230 if (auto *SDst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::sdst)) {
231 if (TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, SDst)) {
232 DPPInst.add(*SDst);
233 ++NumOperands;
234 }
235 // If we shrunk a 64bit vop3b to 32bits, just ignore the sdst
236 }
237
238 const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old);
239 if (OldIdx != -1) {
240 assert(OldIdx == NumOperands);
241 assert(isOfRegClass(
242 CombOldVGPR,
243 *MRI->getRegClass(
244 TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg()),
245 *MRI));
246 auto *Def = getVRegSubRegDef(CombOldVGPR, *MRI);
247 DPPInst.addReg(CombOldVGPR.Reg, Def ? 0 : RegState::Undef,
248 CombOldVGPR.SubReg);
249 ++NumOperands;
250 } else if (TII->isVOPC(DPPOp) || (TII->isVOP3(DPPOp) && OrigOpE32 != -1 &&
251 TII->isVOPC(OrigOpE32))) {
252 // VOPC DPP and VOPC promoted to VOP3 DPP do not have an old operand
253 // because they write to SGPRs not VGPRs
254 } else {
255 // TODO: this discards MAC/FMA instructions for now, let's add it later
256 LLVM_DEBUG(dbgs() << " failed: no old operand in DPP instruction,"
257 " TBD\n");
258 Fail = true;
259 break;
260 }
261
262 if (auto *Mod0 = TII->getNamedOperand(OrigMI,
263 AMDGPU::OpName::src0_modifiers)) {
264 assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
265 AMDGPU::OpName::src0_modifiers));
266 assert(HasVOP3DPP ||
267 (0LL == (Mod0->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
268 DPPInst.addImm(Mod0->getImm());
269 ++NumOperands;
270 } else if (AMDGPU::getNamedOperandIdx(DPPOp,
271 AMDGPU::OpName::src0_modifiers) != -1) {
272 DPPInst.addImm(0);
273 ++NumOperands;
274 }
275 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
276 assert(Src0);
277 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) {
278 LLVM_DEBUG(dbgs() << " failed: src0 is illegal\n");
279 Fail = true;
280 break;
281 }
282 DPPInst.add(*Src0);
283 DPPInst->getOperand(NumOperands).setIsKill(false);
284 ++NumOperands;
285
286 if (auto *Mod1 = TII->getNamedOperand(OrigMI,
287 AMDGPU::OpName::src1_modifiers)) {
288 assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
289 AMDGPU::OpName::src1_modifiers));
290 assert(HasVOP3DPP ||
291 (0LL == (Mod1->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
292 DPPInst.addImm(Mod1->getImm());
293 ++NumOperands;
294 } else if (AMDGPU::getNamedOperandIdx(DPPOp,
295 AMDGPU::OpName::src1_modifiers) != -1) {
296 DPPInst.addImm(0);
297 ++NumOperands;
298 }
299 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
300 if (Src1) {
301 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src1)) {
302 LLVM_DEBUG(dbgs() << " failed: src1 is illegal\n");
303 Fail = true;
304 break;
305 }
306 DPPInst.add(*Src1);
307 ++NumOperands;
308 }
309 if (auto *Mod2 =
310 TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2_modifiers)) {
311 assert(NumOperands ==
312 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::src2_modifiers));
313 assert(HasVOP3DPP ||
314 (0LL == (Mod2->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
315 DPPInst.addImm(Mod2->getImm());
316 ++NumOperands;
317 }
318 auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
319 if (Src2) {
320 if (!TII->getNamedOperand(*DPPInst.getInstr(), AMDGPU::OpName::src2) ||
321 !TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src2)) {
322 LLVM_DEBUG(dbgs() << " failed: src2 is illegal\n");
323 Fail = true;
324 break;
325 }
326 DPPInst.add(*Src2);
327 ++NumOperands;
328 }
329 if (HasVOP3DPP) {
330 auto *ClampOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::clamp);
331 if (ClampOpr &&
332 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::clamp) != -1) {
333 DPPInst.addImm(ClampOpr->getImm());
334 }
335 auto *VdstInOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst_in);
336 if (VdstInOpr &&
337 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::vdst_in) != -1) {
338 DPPInst.add(*VdstInOpr);
339 }
340 auto *OmodOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::omod);
341 if (OmodOpr &&
342 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::omod) != -1) {
343 DPPInst.addImm(OmodOpr->getImm());
344 }
345 // Validate OP_SEL has to be set to all 0 and OP_SEL_HI has to be set to
346 // all 1.
347 if (auto *OpSelOpr =
348 TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel)) {
349 auto OpSel = OpSelOpr->getImm();
350 if (OpSel != 0) {
351 LLVM_DEBUG(dbgs() << " failed: op_sel must be zero\n");
352 Fail = true;
353 break;
354 }
355 if (AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::op_sel) != -1)
356 DPPInst.addImm(OpSel);
357 }
358 if (auto *OpSelHiOpr =
359 TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel_hi)) {
360 auto OpSelHi = OpSelHiOpr->getImm();
361 // Only vop3p has op_sel_hi, and all vop3p have 3 operands, so check
362 // the bitmask for 3 op_sel_hi bits set
363 assert(Src2 && "Expected vop3p with 3 operands");
364 if (OpSelHi != 7) {
365 LLVM_DEBUG(dbgs() << " failed: op_sel_hi must be all set to one\n");
366 Fail = true;
367 break;
368 }
369 if (AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::op_sel_hi) != -1)
370 DPPInst.addImm(OpSelHi);
371 }
372 auto *NegOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_lo);
373 if (NegOpr &&
374 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::neg_lo) != -1) {
375 DPPInst.addImm(NegOpr->getImm());
376 }
377 auto *NegHiOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_hi);
378 if (NegHiOpr &&
379 AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::neg_hi) != -1) {
380 DPPInst.addImm(NegHiOpr->getImm());
381 }
382 }
383 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl));
384 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask));
385 DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask));
386 DPPInst.addImm(CombBCZ ? 1 : 0);
387 } while (false);
388
389 if (Fail) {
390 DPPInst.getInstr()->eraseFromParent();
391 return nullptr;
392 }
393 LLVM_DEBUG(dbgs() << " combined: " << *DPPInst.getInstr());
394 return DPPInst.getInstr();
395 }
396
isIdentityValue(unsigned OrigMIOp,MachineOperand * OldOpnd)397 static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) {
398 assert(OldOpnd->isImm());
399 switch (OrigMIOp) {
400 default: break;
401 case AMDGPU::V_ADD_U32_e32:
402 case AMDGPU::V_ADD_U32_e64:
403 case AMDGPU::V_ADD_CO_U32_e32:
404 case AMDGPU::V_ADD_CO_U32_e64:
405 case AMDGPU::V_OR_B32_e32:
406 case AMDGPU::V_OR_B32_e64:
407 case AMDGPU::V_SUBREV_U32_e32:
408 case AMDGPU::V_SUBREV_U32_e64:
409 case AMDGPU::V_SUBREV_CO_U32_e32:
410 case AMDGPU::V_SUBREV_CO_U32_e64:
411 case AMDGPU::V_MAX_U32_e32:
412 case AMDGPU::V_MAX_U32_e64:
413 case AMDGPU::V_XOR_B32_e32:
414 case AMDGPU::V_XOR_B32_e64:
415 if (OldOpnd->getImm() == 0)
416 return true;
417 break;
418 case AMDGPU::V_AND_B32_e32:
419 case AMDGPU::V_AND_B32_e64:
420 case AMDGPU::V_MIN_U32_e32:
421 case AMDGPU::V_MIN_U32_e64:
422 if (static_cast<uint32_t>(OldOpnd->getImm()) ==
423 std::numeric_limits<uint32_t>::max())
424 return true;
425 break;
426 case AMDGPU::V_MIN_I32_e32:
427 case AMDGPU::V_MIN_I32_e64:
428 if (static_cast<int32_t>(OldOpnd->getImm()) ==
429 std::numeric_limits<int32_t>::max())
430 return true;
431 break;
432 case AMDGPU::V_MAX_I32_e32:
433 case AMDGPU::V_MAX_I32_e64:
434 if (static_cast<int32_t>(OldOpnd->getImm()) ==
435 std::numeric_limits<int32_t>::min())
436 return true;
437 break;
438 case AMDGPU::V_MUL_I32_I24_e32:
439 case AMDGPU::V_MUL_I32_I24_e64:
440 case AMDGPU::V_MUL_U32_U24_e32:
441 case AMDGPU::V_MUL_U32_U24_e64:
442 if (OldOpnd->getImm() == 1)
443 return true;
444 break;
445 }
446 return false;
447 }
448
createDPPInst(MachineInstr & OrigMI,MachineInstr & MovMI,RegSubRegPair CombOldVGPR,MachineOperand * OldOpndValue,bool CombBCZ,bool IsShrinkable) const449 MachineInstr *GCNDPPCombine::createDPPInst(
450 MachineInstr &OrigMI, MachineInstr &MovMI, RegSubRegPair CombOldVGPR,
451 MachineOperand *OldOpndValue, bool CombBCZ, bool IsShrinkable) const {
452 assert(CombOldVGPR.Reg);
453 if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) {
454 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
455 if (!Src1 || !Src1->isReg()) {
456 LLVM_DEBUG(dbgs() << " failed: no src1 or it isn't a register\n");
457 return nullptr;
458 }
459 if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) {
460 LLVM_DEBUG(dbgs() << " failed: old immediate isn't an identity\n");
461 return nullptr;
462 }
463 CombOldVGPR = getRegSubRegPair(*Src1);
464 auto MovDst = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
465 const TargetRegisterClass *RC = MRI->getRegClass(MovDst->getReg());
466 if (!isOfRegClass(CombOldVGPR, *RC, *MRI)) {
467 LLVM_DEBUG(dbgs() << " failed: src1 has wrong register class\n");
468 return nullptr;
469 }
470 }
471 return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ, IsShrinkable);
472 }
473
474 // returns true if MI doesn't have OpndName immediate operand or the
475 // operand has Value
hasNoImmOrEqual(MachineInstr & MI,unsigned OpndName,int64_t Value,int64_t Mask) const476 bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,
477 int64_t Value, int64_t Mask) const {
478 auto *Imm = TII->getNamedOperand(MI, OpndName);
479 if (!Imm)
480 return true;
481
482 assert(Imm->isImm());
483 return (Imm->getImm() & Mask) == Value;
484 }
485
combineDPPMov(MachineInstr & MovMI) const486 bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
487 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
488 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
489 MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
490 LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
491
492 auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
493 assert(DstOpnd && DstOpnd->isReg());
494 auto DPPMovReg = DstOpnd->getReg();
495 if (DPPMovReg.isPhysical()) {
496 LLVM_DEBUG(dbgs() << " failed: dpp move writes physreg\n");
497 return false;
498 }
499 if (execMayBeModifiedBeforeAnyUse(*MRI, DPPMovReg, MovMI)) {
500 LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same"
501 " for all uses\n");
502 return false;
503 }
504
505 if (MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
506 MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
507 auto *DppCtrl = TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl);
508 assert(DppCtrl && DppCtrl->isImm());
509 if (!AMDGPU::isLegal64BitDPPControl(DppCtrl->getImm())) {
510 LLVM_DEBUG(dbgs() << " failed: 64 bit dpp move uses unsupported"
511 " control value\n");
512 // Let it split, then control may become legal.
513 return false;
514 }
515 }
516
517 auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
518 assert(RowMaskOpnd && RowMaskOpnd->isImm());
519 auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
520 assert(BankMaskOpnd && BankMaskOpnd->isImm());
521 const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF &&
522 BankMaskOpnd->getImm() == 0xF;
523
524 auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl);
525 assert(BCZOpnd && BCZOpnd->isImm());
526 bool BoundCtrlZero = BCZOpnd->getImm();
527
528 auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old);
529 auto *SrcOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
530 assert(OldOpnd && OldOpnd->isReg());
531 assert(SrcOpnd && SrcOpnd->isReg());
532 if (OldOpnd->getReg().isPhysical() || SrcOpnd->getReg().isPhysical()) {
533 LLVM_DEBUG(dbgs() << " failed: dpp move reads physreg\n");
534 return false;
535 }
536
537 auto * const OldOpndValue = getOldOpndValue(*OldOpnd);
538 // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else
539 // We could use: assert(!OldOpndValue || OldOpndValue->isImm())
540 // but the third option is used to distinguish undef from non-immediate
541 // to reuse IMPLICIT_DEF instruction later
542 assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd);
543
544 bool CombBCZ = false;
545
546 if (MaskAllLanes && BoundCtrlZero) { // [1]
547 CombBCZ = true;
548 } else {
549 if (!OldOpndValue || !OldOpndValue->isImm()) {
550 LLVM_DEBUG(dbgs() << " failed: the DPP mov isn't combinable\n");
551 return false;
552 }
553
554 if (OldOpndValue->getImm() == 0) {
555 if (MaskAllLanes) {
556 assert(!BoundCtrlZero); // by check [1]
557 CombBCZ = true;
558 }
559 } else if (BoundCtrlZero) {
560 assert(!MaskAllLanes); // by check [1]
561 LLVM_DEBUG(dbgs() <<
562 " failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n");
563 return false;
564 }
565 }
566
567 LLVM_DEBUG(dbgs() << " old=";
568 if (!OldOpndValue)
569 dbgs() << "undef";
570 else
571 dbgs() << *OldOpndValue;
572 dbgs() << ", bound_ctrl=" << CombBCZ << '\n');
573
574 SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs;
575 DenseMap<MachineInstr*, SmallVector<unsigned, 4>> RegSeqWithOpNos;
576 auto CombOldVGPR = getRegSubRegPair(*OldOpnd);
577 // try to reuse previous old reg if its undefined (IMPLICIT_DEF)
578 if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef
579 const TargetRegisterClass *RC = MRI->getRegClass(DPPMovReg);
580 CombOldVGPR = RegSubRegPair(
581 MRI->createVirtualRegister(RC));
582 auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(),
583 TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg);
584 DPPMIs.push_back(UndefInst.getInstr());
585 }
586
587 OrigMIs.push_back(&MovMI);
588 bool Rollback = true;
589 SmallVector<MachineOperand*, 16> Uses;
590
591 for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) {
592 Uses.push_back(&Use);
593 }
594
595 while (!Uses.empty()) {
596 MachineOperand *Use = Uses.pop_back_val();
597 Rollback = true;
598
599 auto &OrigMI = *Use->getParent();
600 LLVM_DEBUG(dbgs() << " try: " << OrigMI);
601
602 auto OrigOp = OrigMI.getOpcode();
603 if (OrigOp == AMDGPU::REG_SEQUENCE) {
604 Register FwdReg = OrigMI.getOperand(0).getReg();
605 unsigned FwdSubReg = 0;
606
607 if (execMayBeModifiedBeforeAnyUse(*MRI, FwdReg, OrigMI)) {
608 LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same"
609 " for all uses\n");
610 break;
611 }
612
613 unsigned OpNo, E = OrigMI.getNumOperands();
614 for (OpNo = 1; OpNo < E; OpNo += 2) {
615 if (OrigMI.getOperand(OpNo).getReg() == DPPMovReg) {
616 FwdSubReg = OrigMI.getOperand(OpNo + 1).getImm();
617 break;
618 }
619 }
620
621 if (!FwdSubReg)
622 break;
623
624 for (auto &Op : MRI->use_nodbg_operands(FwdReg)) {
625 if (Op.getSubReg() == FwdSubReg)
626 Uses.push_back(&Op);
627 }
628 RegSeqWithOpNos[&OrigMI].push_back(OpNo);
629 continue;
630 }
631
632 bool IsShrinkable = isShrinkable(OrigMI);
633 if (!(IsShrinkable ||
634 ((TII->isVOP3P(OrigOp) || TII->isVOPC(OrigOp) ||
635 TII->isVOP3(OrigOp)) &&
636 ST->hasVOP3DPP()) ||
637 TII->isVOP1(OrigOp) || TII->isVOP2(OrigOp))) {
638 LLVM_DEBUG(dbgs() << " failed: not VOP1/2/3/3P/C\n");
639 break;
640 }
641 if (OrigMI.modifiesRegister(AMDGPU::EXEC, ST->getRegisterInfo())) {
642 LLVM_DEBUG(dbgs() << " failed: can't combine v_cmpx\n");
643 break;
644 }
645
646 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0);
647 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
648 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1]
649 LLVM_DEBUG(dbgs() << " failed: no suitable operands\n");
650 break;
651 }
652
653 auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
654 assert(Src0 && "Src1 without Src0?");
655 if ((Use == Src0 && ((Src1 && Src1->isIdenticalTo(*Src0)) ||
656 (Src2 && Src2->isIdenticalTo(*Src0)))) ||
657 (Use == Src1 && (Src1->isIdenticalTo(*Src0) ||
658 (Src2 && Src2->isIdenticalTo(*Src1))))) {
659 LLVM_DEBUG(
660 dbgs()
661 << " " << OrigMI
662 << " failed: DPP register is used more than once per instruction\n");
663 break;
664 }
665
666 LLVM_DEBUG(dbgs() << " combining: " << OrigMI);
667 if (Use == Src0) {
668 if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR,
669 OldOpndValue, CombBCZ, IsShrinkable)) {
670 DPPMIs.push_back(DPPInst);
671 Rollback = false;
672 }
673 } else {
674 assert(Use == Src1 && OrigMI.isCommutable()); // by check [1]
675 auto *BB = OrigMI.getParent();
676 auto *NewMI = BB->getParent()->CloneMachineInstr(&OrigMI);
677 BB->insert(OrigMI, NewMI);
678 if (TII->commuteInstruction(*NewMI)) {
679 LLVM_DEBUG(dbgs() << " commuted: " << *NewMI);
680 if (auto *DPPInst =
681 createDPPInst(*NewMI, MovMI, CombOldVGPR, OldOpndValue, CombBCZ,
682 IsShrinkable)) {
683 DPPMIs.push_back(DPPInst);
684 Rollback = false;
685 }
686 } else
687 LLVM_DEBUG(dbgs() << " failed: cannot be commuted\n");
688 NewMI->eraseFromParent();
689 }
690 if (Rollback)
691 break;
692 OrigMIs.push_back(&OrigMI);
693 }
694
695 Rollback |= !Uses.empty();
696
697 for (auto *MI : *(Rollback? &DPPMIs : &OrigMIs))
698 MI->eraseFromParent();
699
700 if (!Rollback) {
701 for (auto &S : RegSeqWithOpNos) {
702 if (MRI->use_nodbg_empty(S.first->getOperand(0).getReg())) {
703 S.first->eraseFromParent();
704 continue;
705 }
706 while (!S.second.empty())
707 S.first->getOperand(S.second.pop_back_val()).setIsUndef(true);
708 }
709 }
710
711 return !Rollback;
712 }
713
runOnMachineFunction(MachineFunction & MF)714 bool GCNDPPCombine::runOnMachineFunction(MachineFunction &MF) {
715 ST = &MF.getSubtarget<GCNSubtarget>();
716 if (!ST->hasDPP() || skipFunction(MF.getFunction()))
717 return false;
718
719 MRI = &MF.getRegInfo();
720 TII = ST->getInstrInfo();
721
722 bool Changed = false;
723 for (auto &MBB : MF) {
724 for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
725 if (MI.getOpcode() == AMDGPU::V_MOV_B32_dpp && combineDPPMov(MI)) {
726 Changed = true;
727 ++NumDPPMovsCombined;
728 } else if (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
729 MI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
730 if (ST->has64BitDPP() && combineDPPMov(MI)) {
731 Changed = true;
732 ++NumDPPMovsCombined;
733 } else {
734 auto Split = TII->expandMovDPP64(MI);
735 for (auto M : { Split.first, Split.second }) {
736 if (M && combineDPPMov(*M))
737 ++NumDPPMovsCombined;
738 }
739 Changed = true;
740 }
741 }
742 }
743 }
744 return Changed;
745 }
746