1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg() || FoldOp->isGlobal());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71 
72   bool isCommuted() const {
73     return Commuted;
74   }
75 
76   bool needsShrink() const {
77     return ShrinkOpcode != -1;
78   }
79 
80   int getShrinkOpcode() const {
81     return ShrinkOpcode;
82   }
83 };
84 
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87   static char ID;
88   MachineRegisterInfo *MRI;
89   const SIInstrInfo *TII;
90   const SIRegisterInfo *TRI;
91   const GCNSubtarget *ST;
92   const SIMachineFunctionInfo *MFI;
93 
94   void foldOperand(MachineOperand &OpToFold,
95                    MachineInstr *UseMI,
96                    int UseOpIdx,
97                    SmallVectorImpl<FoldCandidate> &FoldList,
98                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99 
100   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101 
102   const MachineOperand *isClamp(const MachineInstr &MI) const;
103   bool tryFoldClamp(MachineInstr &MI);
104 
105   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106   bool tryFoldOMod(MachineInstr &MI);
107 
108 public:
109   SIFoldOperands() : MachineFunctionPass(ID) {
110     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111   }
112 
113   bool runOnMachineFunction(MachineFunction &MF) override;
114 
115   StringRef getPassName() const override { return "SI Fold Operands"; }
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override {
118     AU.setPreservesCFG();
119     MachineFunctionPass::getAnalysisUsage(AU);
120   }
121 };
122 
123 } // End anonymous namespace.
124 
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                 "SI Fold Operands", false, false)
127 
128 char SIFoldOperands::ID = 0;
129 
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131 
132 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
133 static unsigned macToMad(unsigned Opc) {
134   switch (Opc) {
135   case AMDGPU::V_MAC_F32_e64:
136     return AMDGPU::V_MAD_F32;
137   case AMDGPU::V_MAC_F16_e64:
138     return AMDGPU::V_MAD_F16;
139   case AMDGPU::V_FMAC_F32_e64:
140     return AMDGPU::V_FMA_F32;
141   case AMDGPU::V_FMAC_F16_e64:
142     return AMDGPU::V_FMA_F16_gfx9;
143   case AMDGPU::V_FMAC_LEGACY_F32_e64:
144     return AMDGPU::V_FMA_LEGACY_F32;
145   }
146   return AMDGPU::INSTRUCTION_LIST_END;
147 }
148 
149 // Wrapper around isInlineConstant that understands special cases when
150 // instruction types are replaced during operand folding.
151 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
152                                      const MachineInstr &UseMI,
153                                      unsigned OpNo,
154                                      const MachineOperand &OpToFold) {
155   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
156     return true;
157 
158   unsigned Opc = UseMI.getOpcode();
159   unsigned NewOpc = macToMad(Opc);
160   if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
161     // Special case for mac. Since this is replaced with mad when folded into
162     // src2, we need to check the legality for the final instruction.
163     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
164     if (static_cast<int>(OpNo) == Src2Idx) {
165       const MCInstrDesc &MadDesc = TII->get(NewOpc);
166       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
167     }
168   }
169 
170   return false;
171 }
172 
173 // TODO: Add heuristic that the frame index might not fit in the addressing mode
174 // immediate offset to avoid materializing in loops.
175 static bool frameIndexMayFold(const SIInstrInfo *TII,
176                               const MachineInstr &UseMI,
177                               int OpNo,
178                               const MachineOperand &OpToFold) {
179   if (!OpToFold.isFI())
180     return false;
181 
182   if (TII->isMUBUF(UseMI))
183     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
184                                               AMDGPU::OpName::vaddr);
185   if (!TII->isFLATScratch(UseMI))
186     return false;
187 
188   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
189                                         AMDGPU::OpName::saddr);
190   if (OpNo == SIdx)
191     return true;
192 
193   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
194                                         AMDGPU::OpName::vaddr);
195   return OpNo == VIdx && SIdx == -1;
196 }
197 
198 FunctionPass *llvm::createSIFoldOperandsPass() {
199   return new SIFoldOperands();
200 }
201 
202 static bool updateOperand(FoldCandidate &Fold,
203                           const SIInstrInfo &TII,
204                           const TargetRegisterInfo &TRI,
205                           const GCNSubtarget &ST) {
206   MachineInstr *MI = Fold.UseMI;
207   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
208   assert(Old.isReg());
209 
210   if (Fold.isImm()) {
211     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
212         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
213         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
214                                       ST.hasInv2PiInlineImm())) {
215       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
216       // already set.
217       unsigned Opcode = MI->getOpcode();
218       int OpNo = MI->getOperandNo(&Old);
219       int ModIdx = -1;
220       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
221         ModIdx = AMDGPU::OpName::src0_modifiers;
222       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
223         ModIdx = AMDGPU::OpName::src1_modifiers;
224       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
225         ModIdx = AMDGPU::OpName::src2_modifiers;
226       assert(ModIdx != -1);
227       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
228       MachineOperand &Mod = MI->getOperand(ModIdx);
229       unsigned Val = Mod.getImm();
230       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
231         // Only apply the following transformation if that operand requries
232         // a packed immediate.
233         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
234         case AMDGPU::OPERAND_REG_IMM_V2FP16:
235         case AMDGPU::OPERAND_REG_IMM_V2INT16:
236         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
237         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
238           // If upper part is all zero we do not need op_sel_hi.
239           if (!isUInt<16>(Fold.ImmToFold)) {
240             if (!(Fold.ImmToFold & 0xffff)) {
241               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
242               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
243               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
244               return true;
245             }
246             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
247             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
248             return true;
249           }
250           break;
251         default:
252           break;
253         }
254       }
255     }
256   }
257 
258   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
259     MachineBasicBlock *MBB = MI->getParent();
260     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
261     if (Liveness != MachineBasicBlock::LQR_Dead) {
262       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
263       return false;
264     }
265 
266     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
267     int Op32 = Fold.getShrinkOpcode();
268     MachineOperand &Dst0 = MI->getOperand(0);
269     MachineOperand &Dst1 = MI->getOperand(1);
270     assert(Dst0.isDef() && Dst1.isDef());
271 
272     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
273 
274     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
275     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
276 
277     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
278 
279     if (HaveNonDbgCarryUse) {
280       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
281         .addReg(AMDGPU::VCC, RegState::Kill);
282     }
283 
284     // Keep the old instruction around to avoid breaking iterators, but
285     // replace it with a dummy instruction to remove uses.
286     //
287     // FIXME: We should not invert how this pass looks at operands to avoid
288     // this. Should track set of foldable movs instead of looking for uses
289     // when looking at a use.
290     Dst0.setReg(NewReg0);
291     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
292       MI->RemoveOperand(I);
293     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
294 
295     if (Fold.isCommuted())
296       TII.commuteInstruction(*Inst32, false);
297     return true;
298   }
299 
300   assert(!Fold.needsShrink() && "not handled");
301 
302   if (Fold.isImm()) {
303     Old.ChangeToImmediate(Fold.ImmToFold);
304     return true;
305   }
306 
307   if (Fold.isGlobal()) {
308     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
309                    Fold.OpToFold->getTargetFlags());
310     return true;
311   }
312 
313   if (Fold.isFI()) {
314     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
315     return true;
316   }
317 
318   MachineOperand *New = Fold.OpToFold;
319   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
320   Old.setIsUndef(New->isUndef());
321   return true;
322 }
323 
324 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
325                               const MachineInstr *MI) {
326   for (auto Candidate : FoldList) {
327     if (Candidate.UseMI == MI)
328       return true;
329   }
330   return false;
331 }
332 
333 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
334                                 MachineInstr *MI, unsigned OpNo,
335                                 MachineOperand *FoldOp, bool Commuted = false,
336                                 int ShrinkOp = -1) {
337   // Skip additional folding on the same operand.
338   for (FoldCandidate &Fold : FoldList)
339     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
340       return;
341   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
342                     << " operand " << OpNo << "\n  " << *MI << '\n');
343   FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
344 }
345 
346 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
347                              MachineInstr *MI, unsigned OpNo,
348                              MachineOperand *OpToFold,
349                              const SIInstrInfo *TII) {
350   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
351     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
352     unsigned Opc = MI->getOpcode();
353     unsigned NewOpc = macToMad(Opc);
354     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
355       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
356       // to fold the operand.
357       MI->setDesc(TII->get(NewOpc));
358       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
359       if (FoldAsMAD) {
360         MI->untieRegOperand(OpNo);
361         return true;
362       }
363       MI->setDesc(TII->get(Opc));
364     }
365 
366     // Special case for s_setreg_b32
367     if (OpToFold->isImm()) {
368       unsigned ImmOpc = 0;
369       if (Opc == AMDGPU::S_SETREG_B32)
370         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
371       else if (Opc == AMDGPU::S_SETREG_B32_mode)
372         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
373       if (ImmOpc) {
374         MI->setDesc(TII->get(ImmOpc));
375         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
376         return true;
377       }
378     }
379 
380     // If we are already folding into another operand of MI, then
381     // we can't commute the instruction, otherwise we risk making the
382     // other fold illegal.
383     if (isUseMIInFoldList(FoldList, MI))
384       return false;
385 
386     unsigned CommuteOpNo = OpNo;
387 
388     // Operand is not legal, so try to commute the instruction to
389     // see if this makes it possible to fold.
390     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
391     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
392     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
393 
394     if (CanCommute) {
395       if (CommuteIdx0 == OpNo)
396         CommuteOpNo = CommuteIdx1;
397       else if (CommuteIdx1 == OpNo)
398         CommuteOpNo = CommuteIdx0;
399     }
400 
401 
402     // One of operands might be an Imm operand, and OpNo may refer to it after
403     // the call of commuteInstruction() below. Such situations are avoided
404     // here explicitly as OpNo must be a register operand to be a candidate
405     // for memory folding.
406     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
407                        !MI->getOperand(CommuteIdx1).isReg()))
408       return false;
409 
410     if (!CanCommute ||
411         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
412       return false;
413 
414     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
415       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
416            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
417            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
418           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
419         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
420 
421         // Verify the other operand is a VGPR, otherwise we would violate the
422         // constant bus restriction.
423         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
424         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
425         if (!OtherOp.isReg() ||
426             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
427           return false;
428 
429         assert(MI->getOperand(1).isDef());
430 
431         // Make sure to get the 32-bit version of the commuted opcode.
432         unsigned MaybeCommutedOpc = MI->getOpcode();
433         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
434 
435         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
436         return true;
437       }
438 
439       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
440       return false;
441     }
442 
443     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
444     return true;
445   }
446 
447   // Check the case where we might introduce a second constant operand to a
448   // scalar instruction
449   if (TII->isSALU(MI->getOpcode())) {
450     const MCInstrDesc &InstDesc = MI->getDesc();
451     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
452     const SIRegisterInfo &SRI = TII->getRegisterInfo();
453 
454     // Fine if the operand can be encoded as an inline constant
455     if (OpToFold->isImm()) {
456       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
457           !TII->isInlineConstant(*OpToFold, OpInfo)) {
458         // Otherwise check for another constant
459         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
460           auto &Op = MI->getOperand(i);
461           if (OpNo != i &&
462               TII->isLiteralConstantLike(Op, OpInfo)) {
463             return false;
464           }
465         }
466       }
467     }
468   }
469 
470   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
471   return true;
472 }
473 
474 // If the use operand doesn't care about the value, this may be an operand only
475 // used for register indexing, in which case it is unsafe to fold.
476 static bool isUseSafeToFold(const SIInstrInfo *TII,
477                             const MachineInstr &MI,
478                             const MachineOperand &UseMO) {
479   if (UseMO.isUndef() || TII->isSDWA(MI))
480     return false;
481 
482   switch (MI.getOpcode()) {
483   case AMDGPU::V_MOV_B32_e32:
484   case AMDGPU::V_MOV_B32_e64:
485   case AMDGPU::V_MOV_B64_PSEUDO:
486     // Do not fold into an indirect mov.
487     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
488   }
489 
490   return true;
491   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
492 }
493 
494 // Find a def of the UseReg, check if it is a reg_seqence and find initializers
495 // for each subreg, tracking it to foldable inline immediate if possible.
496 // Returns true on success.
497 static bool getRegSeqInit(
498     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
499     Register UseReg, uint8_t OpTy,
500     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
501   MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
502   if (!Def || !Def->isRegSequence())
503     return false;
504 
505   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
506     MachineOperand *Sub = &Def->getOperand(I);
507     assert (Sub->isReg());
508 
509     for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
510          SubDef && Sub->isReg() && !Sub->getSubReg() &&
511          TII->isFoldableCopy(*SubDef);
512          SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
513       MachineOperand *Op = &SubDef->getOperand(1);
514       if (Op->isImm()) {
515         if (TII->isInlineConstant(*Op, OpTy))
516           Sub = Op;
517         break;
518       }
519       if (!Op->isReg())
520         break;
521       Sub = Op;
522     }
523 
524     Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
525   }
526 
527   return true;
528 }
529 
530 static bool tryToFoldACImm(const SIInstrInfo *TII,
531                            const MachineOperand &OpToFold,
532                            MachineInstr *UseMI,
533                            unsigned UseOpIdx,
534                            SmallVectorImpl<FoldCandidate> &FoldList) {
535   const MCInstrDesc &Desc = UseMI->getDesc();
536   const MCOperandInfo *OpInfo = Desc.OpInfo;
537   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
538     return false;
539 
540   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
541   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
542       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
543     return false;
544 
545   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
546       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
547     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
548     return true;
549   }
550 
551   if (!OpToFold.isReg())
552     return false;
553 
554   Register UseReg = OpToFold.getReg();
555   if (!UseReg.isVirtual())
556     return false;
557 
558   if (llvm::any_of(FoldList, [UseMI](const FoldCandidate &FC) {
559         return FC.UseMI == UseMI;
560       }))
561     return false;
562 
563   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
564   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
565   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
566     return false;
567 
568   int32_t Imm;
569   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
570     const MachineOperand *Op = Defs[I].first;
571     if (!Op->isImm())
572       return false;
573 
574     auto SubImm = Op->getImm();
575     if (!I) {
576       Imm = SubImm;
577       if (!TII->isInlineConstant(*Op, OpTy) ||
578           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
579         return false;
580 
581       continue;
582     }
583     if (Imm != SubImm)
584       return false; // Can only fold splat constants
585   }
586 
587   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
588   return true;
589 }
590 
591 void SIFoldOperands::foldOperand(
592   MachineOperand &OpToFold,
593   MachineInstr *UseMI,
594   int UseOpIdx,
595   SmallVectorImpl<FoldCandidate> &FoldList,
596   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
597   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
598 
599   if (!isUseSafeToFold(TII, *UseMI, UseOp))
600     return;
601 
602   // FIXME: Fold operands with subregs.
603   if (UseOp.isReg() && OpToFold.isReg()) {
604     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
605       return;
606   }
607 
608   // Special case for REG_SEQUENCE: We can't fold literals into
609   // REG_SEQUENCE instructions, so we have to fold them into the
610   // uses of REG_SEQUENCE.
611   if (UseMI->isRegSequence()) {
612     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
613     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
614 
615     MachineRegisterInfo::use_nodbg_iterator Next;
616     for (MachineRegisterInfo::use_nodbg_iterator
617            RSUse = MRI->use_nodbg_begin(RegSeqDstReg), RSE = MRI->use_nodbg_end();
618          RSUse != RSE; RSUse = Next) {
619       Next = std::next(RSUse);
620 
621       MachineInstr *RSUseMI = RSUse->getParent();
622 
623       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
624                          RSUse.getOperandNo(), FoldList))
625         continue;
626 
627       if (RSUse->getSubReg() != RegSeqDstSubReg)
628         continue;
629 
630       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
631                   CopiesToReplace);
632     }
633 
634     return;
635   }
636 
637   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
638     return;
639 
640   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
641     // Sanity check that this is a stack access.
642     // FIXME: Should probably use stack pseudos before frame lowering.
643 
644     if (TII->isMUBUF(*UseMI)) {
645       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
646           MFI->getScratchRSrcReg())
647         return;
648 
649       // Ensure this is either relative to the current frame or the current
650       // wave.
651       MachineOperand &SOff =
652           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
653       if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) &&
654           (!SOff.isImm() || SOff.getImm() != 0))
655         return;
656 
657       // If this is relative to the current wave, update it to be relative to
658       // the current frame.
659       if (SOff.isImm())
660         SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false);
661     }
662 
663     // A frame index will resolve to a positive constant, so it should always be
664     // safe to fold the addressing mode, even pre-GFX9.
665     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
666 
667     if (TII->isFLATScratch(*UseMI) &&
668         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
669                                    AMDGPU::OpName::vaddr) != -1) {
670       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
671       UseMI->setDesc(TII->get(NewOpc));
672     }
673 
674     return;
675   }
676 
677   bool FoldingImmLike =
678       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
679 
680   if (FoldingImmLike && UseMI->isCopy()) {
681     Register DestReg = UseMI->getOperand(0).getReg();
682     Register SrcReg = UseMI->getOperand(1).getReg();
683     assert(SrcReg.isVirtual());
684 
685     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
686 
687     // Don't fold into a copy to a physical register with the same class. Doing
688     // so would interfere with the register coalescer's logic which would avoid
689     // redundant initalizations.
690     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
691       return;
692 
693     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
694     if (!DestReg.isPhysical()) {
695       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
696         MachineRegisterInfo::use_nodbg_iterator NextUse;
697         SmallVector<FoldCandidate, 4> CopyUses;
698         for (MachineRegisterInfo::use_nodbg_iterator Use = MRI->use_nodbg_begin(DestReg),
699                E = MRI->use_nodbg_end();
700              Use != E; Use = NextUse) {
701           NextUse = std::next(Use);
702           // There's no point trying to fold into an implicit operand.
703           if (Use->isImplicit())
704             continue;
705 
706           FoldCandidate FC = FoldCandidate(Use->getParent(), Use.getOperandNo(),
707                                            &UseMI->getOperand(1));
708           CopyUses.push_back(FC);
709         }
710         for (auto &F : CopyUses) {
711           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
712         }
713       }
714 
715       if (DestRC == &AMDGPU::AGPR_32RegClass &&
716           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
717         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
718         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
719         CopiesToReplace.push_back(UseMI);
720         return;
721       }
722     }
723 
724     // In order to fold immediates into copies, we need to change the
725     // copy to a MOV.
726 
727     unsigned MovOp = TII->getMovOpcode(DestRC);
728     if (MovOp == AMDGPU::COPY)
729       return;
730 
731     UseMI->setDesc(TII->get(MovOp));
732     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
733     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
734     while (ImpOpI != ImpOpE) {
735       MachineInstr::mop_iterator Tmp = ImpOpI;
736       ImpOpI++;
737       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
738     }
739     CopiesToReplace.push_back(UseMI);
740   } else {
741     if (UseMI->isCopy() && OpToFold.isReg() &&
742         UseMI->getOperand(0).getReg().isVirtual() &&
743         !UseMI->getOperand(1).getSubReg()) {
744       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
745                         << "\n into " << *UseMI << '\n');
746       unsigned Size = TII->getOpSize(*UseMI, 1);
747       Register UseReg = OpToFold.getReg();
748       UseMI->getOperand(1).setReg(UseReg);
749       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
750       UseMI->getOperand(1).setIsKill(false);
751       CopiesToReplace.push_back(UseMI);
752       OpToFold.setIsKill(false);
753 
754       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
755       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
756       // its initializers right here, so we will rematerialize immediates and
757       // avoid copies via different reg classes.
758       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
759       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
760           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
761                         *MRI)) {
762         const DebugLoc &DL = UseMI->getDebugLoc();
763         MachineBasicBlock &MBB = *UseMI->getParent();
764 
765         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
766         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
767           UseMI->RemoveOperand(I);
768 
769         MachineInstrBuilder B(*MBB.getParent(), UseMI);
770         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
771         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
772         for (unsigned I = 0; I < Size / 4; ++I) {
773           MachineOperand *Def = Defs[I].first;
774           TargetInstrInfo::RegSubRegPair CopyToVGPR;
775           if (Def->isImm() &&
776               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
777             int64_t Imm = Def->getImm();
778 
779             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
780             BuildMI(MBB, UseMI, DL,
781                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
782             B.addReg(Tmp);
783           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
784             auto Src = getRegSubRegPair(*Def);
785             Def->setIsKill(false);
786             if (!SeenAGPRs.insert(Src)) {
787               // We cannot build a reg_sequence out of the same registers, they
788               // must be copied. Better do it here before copyPhysReg() created
789               // several reads to do the AGPR->VGPR->AGPR copy.
790               CopyToVGPR = Src;
791             } else {
792               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
793                        Src.SubReg);
794             }
795           } else {
796             assert(Def->isReg());
797             Def->setIsKill(false);
798             auto Src = getRegSubRegPair(*Def);
799 
800             // Direct copy from SGPR to AGPR is not possible. To avoid creation
801             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
802             // create a copy here and track if we already have such a copy.
803             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
804               CopyToVGPR = Src;
805             } else {
806               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
807               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
808               B.addReg(Tmp);
809             }
810           }
811 
812           if (CopyToVGPR.Reg) {
813             Register Vgpr;
814             if (VGPRCopies.count(CopyToVGPR)) {
815               Vgpr = VGPRCopies[CopyToVGPR];
816             } else {
817               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
818               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
819               VGPRCopies[CopyToVGPR] = Vgpr;
820             }
821             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
822             BuildMI(MBB, UseMI, DL,
823                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
824             B.addReg(Tmp);
825           }
826 
827           B.addImm(Defs[I].second);
828         }
829         LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
830         return;
831       }
832 
833       if (Size != 4)
834         return;
835       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
836           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
837         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
838       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
839                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
840         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
841       return;
842     }
843 
844     unsigned UseOpc = UseMI->getOpcode();
845     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
846         (UseOpc == AMDGPU::V_READLANE_B32 &&
847          (int)UseOpIdx ==
848          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
849       // %vgpr = V_MOV_B32 imm
850       // %sgpr = V_READFIRSTLANE_B32 %vgpr
851       // =>
852       // %sgpr = S_MOV_B32 imm
853       if (FoldingImmLike) {
854         if (execMayBeModifiedBeforeUse(*MRI,
855                                        UseMI->getOperand(UseOpIdx).getReg(),
856                                        *OpToFold.getParent(),
857                                        *UseMI))
858           return;
859 
860         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
861 
862         if (OpToFold.isImm())
863           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
864         else
865           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
866         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
867         return;
868       }
869 
870       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
871         if (execMayBeModifiedBeforeUse(*MRI,
872                                        UseMI->getOperand(UseOpIdx).getReg(),
873                                        *OpToFold.getParent(),
874                                        *UseMI))
875           return;
876 
877         // %vgpr = COPY %sgpr0
878         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
879         // =>
880         // %sgpr1 = COPY %sgpr0
881         UseMI->setDesc(TII->get(AMDGPU::COPY));
882         UseMI->getOperand(1).setReg(OpToFold.getReg());
883         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
884         UseMI->getOperand(1).setIsKill(false);
885         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
886         return;
887       }
888     }
889 
890     const MCInstrDesc &UseDesc = UseMI->getDesc();
891 
892     // Don't fold into target independent nodes.  Target independent opcodes
893     // don't have defined register classes.
894     if (UseDesc.isVariadic() ||
895         UseOp.isImplicit() ||
896         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
897       return;
898   }
899 
900   if (!FoldingImmLike) {
901     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
902 
903     // FIXME: We could try to change the instruction from 64-bit to 32-bit
904     // to enable more folding opportunites.  The shrink operands pass
905     // already does this.
906     return;
907   }
908 
909 
910   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
911   const TargetRegisterClass *FoldRC =
912     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
913 
914   // Split 64-bit constants into 32-bits for folding.
915   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
916     Register UseReg = UseOp.getReg();
917     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
918 
919     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
920       return;
921 
922     APInt Imm(64, OpToFold.getImm());
923     if (UseOp.getSubReg() == AMDGPU::sub0) {
924       Imm = Imm.getLoBits(32);
925     } else {
926       assert(UseOp.getSubReg() == AMDGPU::sub1);
927       Imm = Imm.getHiBits(32);
928     }
929 
930     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
931     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
932     return;
933   }
934 
935 
936 
937   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
938 }
939 
940 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
941                                   uint32_t LHS, uint32_t RHS) {
942   switch (Opcode) {
943   case AMDGPU::V_AND_B32_e64:
944   case AMDGPU::V_AND_B32_e32:
945   case AMDGPU::S_AND_B32:
946     Result = LHS & RHS;
947     return true;
948   case AMDGPU::V_OR_B32_e64:
949   case AMDGPU::V_OR_B32_e32:
950   case AMDGPU::S_OR_B32:
951     Result = LHS | RHS;
952     return true;
953   case AMDGPU::V_XOR_B32_e64:
954   case AMDGPU::V_XOR_B32_e32:
955   case AMDGPU::S_XOR_B32:
956     Result = LHS ^ RHS;
957     return true;
958   case AMDGPU::S_XNOR_B32:
959     Result = ~(LHS ^ RHS);
960     return true;
961   case AMDGPU::S_NAND_B32:
962     Result = ~(LHS & RHS);
963     return true;
964   case AMDGPU::S_NOR_B32:
965     Result = ~(LHS | RHS);
966     return true;
967   case AMDGPU::S_ANDN2_B32:
968     Result = LHS & ~RHS;
969     return true;
970   case AMDGPU::S_ORN2_B32:
971     Result = LHS | ~RHS;
972     return true;
973   case AMDGPU::V_LSHL_B32_e64:
974   case AMDGPU::V_LSHL_B32_e32:
975   case AMDGPU::S_LSHL_B32:
976     // The instruction ignores the high bits for out of bounds shifts.
977     Result = LHS << (RHS & 31);
978     return true;
979   case AMDGPU::V_LSHLREV_B32_e64:
980   case AMDGPU::V_LSHLREV_B32_e32:
981     Result = RHS << (LHS & 31);
982     return true;
983   case AMDGPU::V_LSHR_B32_e64:
984   case AMDGPU::V_LSHR_B32_e32:
985   case AMDGPU::S_LSHR_B32:
986     Result = LHS >> (RHS & 31);
987     return true;
988   case AMDGPU::V_LSHRREV_B32_e64:
989   case AMDGPU::V_LSHRREV_B32_e32:
990     Result = RHS >> (LHS & 31);
991     return true;
992   case AMDGPU::V_ASHR_I32_e64:
993   case AMDGPU::V_ASHR_I32_e32:
994   case AMDGPU::S_ASHR_I32:
995     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
996     return true;
997   case AMDGPU::V_ASHRREV_I32_e64:
998   case AMDGPU::V_ASHRREV_I32_e32:
999     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1000     return true;
1001   default:
1002     return false;
1003   }
1004 }
1005 
1006 static unsigned getMovOpc(bool IsScalar) {
1007   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1008 }
1009 
1010 /// Remove any leftover implicit operands from mutating the instruction. e.g.
1011 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1012 /// anymore.
1013 static void stripExtraCopyOperands(MachineInstr &MI) {
1014   const MCInstrDesc &Desc = MI.getDesc();
1015   unsigned NumOps = Desc.getNumOperands() +
1016                     Desc.getNumImplicitUses() +
1017                     Desc.getNumImplicitDefs();
1018 
1019   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1020     MI.RemoveOperand(I);
1021 }
1022 
1023 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1024   MI.setDesc(NewDesc);
1025   stripExtraCopyOperands(MI);
1026 }
1027 
1028 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1029                                                MachineOperand &Op) {
1030   if (Op.isReg()) {
1031     // If this has a subregister, it obviously is a register source.
1032     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
1033       return &Op;
1034 
1035     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1036     if (Def && Def->isMoveImmediate()) {
1037       MachineOperand &ImmSrc = Def->getOperand(1);
1038       if (ImmSrc.isImm())
1039         return &ImmSrc;
1040     }
1041   }
1042 
1043   return &Op;
1044 }
1045 
1046 // Try to simplify operations with a constant that may appear after instruction
1047 // selection.
1048 // TODO: See if a frame index with a fixed offset can fold.
1049 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1050                               const SIInstrInfo *TII,
1051                               MachineInstr *MI,
1052                               MachineOperand *ImmOp) {
1053   unsigned Opc = MI->getOpcode();
1054   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1055       Opc == AMDGPU::S_NOT_B32) {
1056     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1057     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1058     return true;
1059   }
1060 
1061   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1062   if (Src1Idx == -1)
1063     return false;
1064 
1065   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1066   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1067   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1068 
1069   if (!Src0->isImm() && !Src1->isImm())
1070     return false;
1071 
1072   // and k0, k1 -> v_mov_b32 (k0 & k1)
1073   // or k0, k1 -> v_mov_b32 (k0 | k1)
1074   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1075   if (Src0->isImm() && Src1->isImm()) {
1076     int32_t NewImm;
1077     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1078       return false;
1079 
1080     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1081     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1082 
1083     // Be careful to change the right operand, src0 may belong to a different
1084     // instruction.
1085     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1086     MI->RemoveOperand(Src1Idx);
1087     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1088     return true;
1089   }
1090 
1091   if (!MI->isCommutable())
1092     return false;
1093 
1094   if (Src0->isImm() && !Src1->isImm()) {
1095     std::swap(Src0, Src1);
1096     std::swap(Src0Idx, Src1Idx);
1097   }
1098 
1099   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1100   if (Opc == AMDGPU::V_OR_B32_e64 ||
1101       Opc == AMDGPU::V_OR_B32_e32 ||
1102       Opc == AMDGPU::S_OR_B32) {
1103     if (Src1Val == 0) {
1104       // y = or x, 0 => y = copy x
1105       MI->RemoveOperand(Src1Idx);
1106       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1107     } else if (Src1Val == -1) {
1108       // y = or x, -1 => y = v_mov_b32 -1
1109       MI->RemoveOperand(Src1Idx);
1110       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1111     } else
1112       return false;
1113 
1114     return true;
1115   }
1116 
1117   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1118       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1119       MI->getOpcode() == AMDGPU::S_AND_B32) {
1120     if (Src1Val == 0) {
1121       // y = and x, 0 => y = v_mov_b32 0
1122       MI->RemoveOperand(Src0Idx);
1123       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1124     } else if (Src1Val == -1) {
1125       // y = and x, -1 => y = copy x
1126       MI->RemoveOperand(Src1Idx);
1127       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1128       stripExtraCopyOperands(*MI);
1129     } else
1130       return false;
1131 
1132     return true;
1133   }
1134 
1135   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1136       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1137       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1138     if (Src1Val == 0) {
1139       // y = xor x, 0 => y = copy x
1140       MI->RemoveOperand(Src1Idx);
1141       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1142       return true;
1143     }
1144   }
1145 
1146   return false;
1147 }
1148 
1149 // Try to fold an instruction into a simpler one
1150 static bool tryFoldInst(const SIInstrInfo *TII,
1151                         MachineInstr *MI) {
1152   unsigned Opc = MI->getOpcode();
1153 
1154   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1155       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1156       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1157     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1158     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1159     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1160     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1161     if (Src1->isIdenticalTo(*Src0) &&
1162         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1163         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1164       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1165       auto &NewDesc =
1166           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1167       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1168       if (Src2Idx != -1)
1169         MI->RemoveOperand(Src2Idx);
1170       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1171       if (Src1ModIdx != -1)
1172         MI->RemoveOperand(Src1ModIdx);
1173       if (Src0ModIdx != -1)
1174         MI->RemoveOperand(Src0ModIdx);
1175       mutateCopyOp(*MI, NewDesc);
1176       LLVM_DEBUG(dbgs() << *MI << '\n');
1177       return true;
1178     }
1179   }
1180 
1181   return false;
1182 }
1183 
1184 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1185                                      MachineOperand &OpToFold) const {
1186   // We need mutate the operands of new mov instructions to add implicit
1187   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1188   // this.
1189   SmallVector<MachineInstr *, 4> CopiesToReplace;
1190   SmallVector<FoldCandidate, 4> FoldList;
1191   MachineOperand &Dst = MI.getOperand(0);
1192 
1193   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1194   if (FoldingImm) {
1195     unsigned NumLiteralUses = 0;
1196     MachineOperand *NonInlineUse = nullptr;
1197     int NonInlineUseOpNo = -1;
1198 
1199     MachineRegisterInfo::use_nodbg_iterator NextUse;
1200     for (MachineRegisterInfo::use_nodbg_iterator
1201            Use = MRI->use_nodbg_begin(Dst.getReg()), E = MRI->use_nodbg_end();
1202          Use != E; Use = NextUse) {
1203       NextUse = std::next(Use);
1204       MachineInstr *UseMI = Use->getParent();
1205       unsigned OpNo = Use.getOperandNo();
1206 
1207       // Folding the immediate may reveal operations that can be constant
1208       // folded or replaced with a copy. This can happen for example after
1209       // frame indices are lowered to constants or from splitting 64-bit
1210       // constants.
1211       //
1212       // We may also encounter cases where one or both operands are
1213       // immediates materialized into a register, which would ordinarily not
1214       // be folded due to multiple uses or operand constraints.
1215 
1216       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1217         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1218 
1219         // Some constant folding cases change the same immediate's use to a new
1220         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1221         // again. The same constant folded instruction could also have a second
1222         // use operand.
1223         NextUse = MRI->use_nodbg_begin(Dst.getReg());
1224         FoldList.clear();
1225         continue;
1226       }
1227 
1228       // Try to fold any inline immediate uses, and then only fold other
1229       // constants if they have one use.
1230       //
1231       // The legality of the inline immediate must be checked based on the use
1232       // operand, not the defining instruction, because 32-bit instructions
1233       // with 32-bit inline immediate sources may be used to materialize
1234       // constants used in 16-bit operands.
1235       //
1236       // e.g. it is unsafe to fold:
1237       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1238       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1239 
1240       // Folding immediates with more than one use will increase program size.
1241       // FIXME: This will also reduce register usage, which may be better
1242       // in some cases. A better heuristic is needed.
1243       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1244         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1245       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1246         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1247                     CopiesToReplace);
1248       } else {
1249         if (++NumLiteralUses == 1) {
1250           NonInlineUse = &*Use;
1251           NonInlineUseOpNo = OpNo;
1252         }
1253       }
1254     }
1255 
1256     if (NumLiteralUses == 1) {
1257       MachineInstr *UseMI = NonInlineUse->getParent();
1258       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1259     }
1260   } else {
1261     // Folding register.
1262     SmallVector <MachineRegisterInfo::use_nodbg_iterator, 4> UsesToProcess;
1263     for (MachineRegisterInfo::use_nodbg_iterator
1264            Use = MRI->use_nodbg_begin(Dst.getReg()), E = MRI->use_nodbg_end();
1265          Use != E; ++Use) {
1266       UsesToProcess.push_back(Use);
1267     }
1268     for (auto U : UsesToProcess) {
1269       MachineInstr *UseMI = U->getParent();
1270 
1271       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1272         FoldList, CopiesToReplace);
1273     }
1274   }
1275 
1276   MachineFunction *MF = MI.getParent()->getParent();
1277   // Make sure we add EXEC uses to any new v_mov instructions created.
1278   for (MachineInstr *Copy : CopiesToReplace)
1279     Copy->addImplicitDefUseOperands(*MF);
1280 
1281   SmallPtrSet<MachineInstr *, 16> Folded;
1282   for (FoldCandidate &Fold : FoldList) {
1283     assert(!Fold.isReg() || Fold.OpToFold);
1284     if (Folded.count(Fold.UseMI))
1285       continue;
1286     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
1287       Register Reg = Fold.OpToFold->getReg();
1288       MachineInstr *DefMI = Fold.OpToFold->getParent();
1289       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1290           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1291         continue;
1292     }
1293     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1294       // Clear kill flags.
1295       if (Fold.isReg()) {
1296         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1297         // FIXME: Probably shouldn't bother trying to fold if not an
1298         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1299         // copies.
1300         MRI->clearKillFlags(Fold.OpToFold->getReg());
1301       }
1302       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1303                         << static_cast<int>(Fold.UseOpNo) << " of "
1304                         << *Fold.UseMI << '\n');
1305       if (tryFoldInst(TII, Fold.UseMI))
1306         Folded.insert(Fold.UseMI);
1307     } else if (Fold.isCommuted()) {
1308       // Restoring instruction's original operand order if fold has failed.
1309       TII->commuteInstruction(*Fold.UseMI, false);
1310     }
1311   }
1312 }
1313 
1314 // Clamp patterns are canonically selected to v_max_* instructions, so only
1315 // handle them.
1316 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1317   unsigned Op = MI.getOpcode();
1318   switch (Op) {
1319   case AMDGPU::V_MAX_F32_e64:
1320   case AMDGPU::V_MAX_F16_e64:
1321   case AMDGPU::V_MAX_F64:
1322   case AMDGPU::V_PK_MAX_F16: {
1323     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1324       return nullptr;
1325 
1326     // Make sure sources are identical.
1327     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1328     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1329     if (!Src0->isReg() || !Src1->isReg() ||
1330         Src0->getReg() != Src1->getReg() ||
1331         Src0->getSubReg() != Src1->getSubReg() ||
1332         Src0->getSubReg() != AMDGPU::NoSubRegister)
1333       return nullptr;
1334 
1335     // Can't fold up if we have modifiers.
1336     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1337       return nullptr;
1338 
1339     unsigned Src0Mods
1340       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1341     unsigned Src1Mods
1342       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1343 
1344     // Having a 0 op_sel_hi would require swizzling the output in the source
1345     // instruction, which we can't do.
1346     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1347                                                       : 0u;
1348     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1349       return nullptr;
1350     return Src0;
1351   }
1352   default:
1353     return nullptr;
1354   }
1355 }
1356 
1357 // We obviously have multiple uses in a clamp since the register is used twice
1358 // in the same instruction.
1359 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1360   int Count = 0;
1361   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1362        I != E; ++I) {
1363     if (++Count > 1)
1364       return false;
1365   }
1366 
1367   return true;
1368 }
1369 
1370 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1371 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1372   const MachineOperand *ClampSrc = isClamp(MI);
1373   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1374     return false;
1375 
1376   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1377 
1378   // The type of clamp must be compatible.
1379   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1380     return false;
1381 
1382   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1383   if (!DefClamp)
1384     return false;
1385 
1386   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1387                     << '\n');
1388 
1389   // Clamp is applied after omod, so it is OK if omod is set.
1390   DefClamp->setImm(1);
1391   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1392   MI.eraseFromParent();
1393   return true;
1394 }
1395 
1396 static int getOModValue(unsigned Opc, int64_t Val) {
1397   switch (Opc) {
1398   case AMDGPU::V_MUL_F32_e64: {
1399     switch (static_cast<uint32_t>(Val)) {
1400     case 0x3f000000: // 0.5
1401       return SIOutMods::DIV2;
1402     case 0x40000000: // 2.0
1403       return SIOutMods::MUL2;
1404     case 0x40800000: // 4.0
1405       return SIOutMods::MUL4;
1406     default:
1407       return SIOutMods::NONE;
1408     }
1409   }
1410   case AMDGPU::V_MUL_F16_e64: {
1411     switch (static_cast<uint16_t>(Val)) {
1412     case 0x3800: // 0.5
1413       return SIOutMods::DIV2;
1414     case 0x4000: // 2.0
1415       return SIOutMods::MUL2;
1416     case 0x4400: // 4.0
1417       return SIOutMods::MUL4;
1418     default:
1419       return SIOutMods::NONE;
1420     }
1421   }
1422   default:
1423     llvm_unreachable("invalid mul opcode");
1424   }
1425 }
1426 
1427 // FIXME: Does this really not support denormals with f16?
1428 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1429 // handled, so will anything other than that break?
1430 std::pair<const MachineOperand *, int>
1431 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1432   unsigned Op = MI.getOpcode();
1433   switch (Op) {
1434   case AMDGPU::V_MUL_F32_e64:
1435   case AMDGPU::V_MUL_F16_e64: {
1436     // If output denormals are enabled, omod is ignored.
1437     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1438         (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1439       return std::make_pair(nullptr, SIOutMods::NONE);
1440 
1441     const MachineOperand *RegOp = nullptr;
1442     const MachineOperand *ImmOp = nullptr;
1443     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1444     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1445     if (Src0->isImm()) {
1446       ImmOp = Src0;
1447       RegOp = Src1;
1448     } else if (Src1->isImm()) {
1449       ImmOp = Src1;
1450       RegOp = Src0;
1451     } else
1452       return std::make_pair(nullptr, SIOutMods::NONE);
1453 
1454     int OMod = getOModValue(Op, ImmOp->getImm());
1455     if (OMod == SIOutMods::NONE ||
1456         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1457         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1458         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1459         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1460       return std::make_pair(nullptr, SIOutMods::NONE);
1461 
1462     return std::make_pair(RegOp, OMod);
1463   }
1464   case AMDGPU::V_ADD_F32_e64:
1465   case AMDGPU::V_ADD_F16_e64: {
1466     // If output denormals are enabled, omod is ignored.
1467     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1468         (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1469       return std::make_pair(nullptr, SIOutMods::NONE);
1470 
1471     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1472     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1473     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1474 
1475     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1476         Src0->getSubReg() == Src1->getSubReg() &&
1477         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1478         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1479         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1480         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1481       return std::make_pair(Src0, SIOutMods::MUL2);
1482 
1483     return std::make_pair(nullptr, SIOutMods::NONE);
1484   }
1485   default:
1486     return std::make_pair(nullptr, SIOutMods::NONE);
1487   }
1488 }
1489 
1490 // FIXME: Does this need to check IEEE bit on function?
1491 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1492   const MachineOperand *RegOp;
1493   int OMod;
1494   std::tie(RegOp, OMod) = isOMod(MI);
1495   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1496       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1497       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1498     return false;
1499 
1500   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1501   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1502   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1503     return false;
1504 
1505   // Clamp is applied after omod. If the source already has clamp set, don't
1506   // fold it.
1507   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1508     return false;
1509 
1510   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1511 
1512   DefOMod->setImm(OMod);
1513   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1514   MI.eraseFromParent();
1515   return true;
1516 }
1517 
1518 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1519   if (skipFunction(MF.getFunction()))
1520     return false;
1521 
1522   MRI = &MF.getRegInfo();
1523   ST = &MF.getSubtarget<GCNSubtarget>();
1524   TII = ST->getInstrInfo();
1525   TRI = &TII->getRegisterInfo();
1526   MFI = MF.getInfo<SIMachineFunctionInfo>();
1527 
1528   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1529   // correctly handle signed zeros.
1530   //
1531   // FIXME: Also need to check strictfp
1532   bool IsIEEEMode = MFI->getMode().IEEE;
1533   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1534 
1535   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1536     MachineBasicBlock::iterator I, Next;
1537 
1538     MachineOperand *CurrentKnownM0Val = nullptr;
1539     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1540       Next = std::next(I);
1541       MachineInstr &MI = *I;
1542 
1543       tryFoldInst(TII, &MI);
1544 
1545       if (!TII->isFoldableCopy(MI)) {
1546         // Saw an unknown clobber of m0, so we no longer know what it is.
1547         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1548           CurrentKnownM0Val = nullptr;
1549 
1550         // TODO: Omod might be OK if there is NSZ only on the source
1551         // instruction, and not the omod multiply.
1552         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1553             !tryFoldOMod(MI))
1554           tryFoldClamp(MI);
1555 
1556         continue;
1557       }
1558 
1559       // Specially track simple redefs of m0 to the same value in a block, so we
1560       // can erase the later ones.
1561       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1562         MachineOperand &NewM0Val = MI.getOperand(1);
1563         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1564           MI.eraseFromParent();
1565           continue;
1566         }
1567 
1568         // We aren't tracking other physical registers
1569         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1570           nullptr : &NewM0Val;
1571         continue;
1572       }
1573 
1574       MachineOperand &OpToFold = MI.getOperand(1);
1575       bool FoldingImm =
1576           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1577 
1578       // FIXME: We could also be folding things like TargetIndexes.
1579       if (!FoldingImm && !OpToFold.isReg())
1580         continue;
1581 
1582       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1583         continue;
1584 
1585       // Prevent folding operands backwards in the function. For example,
1586       // the COPY opcode must not be replaced by 1 in this example:
1587       //
1588       //    %3 = COPY %vgpr0; VGPR_32:%3
1589       //    ...
1590       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1591       MachineOperand &Dst = MI.getOperand(0);
1592       if (Dst.isReg() && !Dst.getReg().isVirtual())
1593         continue;
1594 
1595       foldInstOperand(MI, OpToFold);
1596     }
1597   }
1598   return true;
1599 }
1600