1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg() || FoldOp->isGlobal());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71 
72   bool isCommuted() const {
73     return Commuted;
74   }
75 
76   bool needsShrink() const {
77     return ShrinkOpcode != -1;
78   }
79 
80   int getShrinkOpcode() const {
81     return ShrinkOpcode;
82   }
83 };
84 
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87   static char ID;
88   MachineRegisterInfo *MRI;
89   const SIInstrInfo *TII;
90   const SIRegisterInfo *TRI;
91   const GCNSubtarget *ST;
92   const SIMachineFunctionInfo *MFI;
93 
94   void foldOperand(MachineOperand &OpToFold,
95                    MachineInstr *UseMI,
96                    int UseOpIdx,
97                    SmallVectorImpl<FoldCandidate> &FoldList,
98                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99 
100   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101 
102   const MachineOperand *isClamp(const MachineInstr &MI) const;
103   bool tryFoldClamp(MachineInstr &MI);
104 
105   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106   bool tryFoldOMod(MachineInstr &MI);
107 
108 public:
109   SIFoldOperands() : MachineFunctionPass(ID) {
110     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111   }
112 
113   bool runOnMachineFunction(MachineFunction &MF) override;
114 
115   StringRef getPassName() const override { return "SI Fold Operands"; }
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override {
118     AU.setPreservesCFG();
119     MachineFunctionPass::getAnalysisUsage(AU);
120   }
121 };
122 
123 } // End anonymous namespace.
124 
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                 "SI Fold Operands", false, false)
127 
128 char SIFoldOperands::ID = 0;
129 
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131 
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135                                      const MachineInstr &UseMI,
136                                      unsigned OpNo,
137                                      const MachineOperand &OpToFold) {
138   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139     return true;
140 
141   unsigned Opc = UseMI.getOpcode();
142   switch (Opc) {
143   case AMDGPU::V_MAC_F32_e64:
144   case AMDGPU::V_MAC_F16_e64:
145   case AMDGPU::V_FMAC_F32_e64:
146   case AMDGPU::V_FMAC_F16_e64: {
147     // Special case for mac. Since this is replaced with mad when folded into
148     // src2, we need to check the legality for the final instruction.
149     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150     if (static_cast<int>(OpNo) == Src2Idx) {
151       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152                    Opc == AMDGPU::V_FMAC_F16_e64;
153       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154                    Opc == AMDGPU::V_FMAC_F32_e64;
155 
156       unsigned Opc = IsFMA ?
157         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159       const MCInstrDesc &MadDesc = TII->get(Opc);
160       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
161     }
162     return false;
163   }
164   default:
165     return false;
166   }
167 }
168 
169 // TODO: Add heuristic that the frame index might not fit in the addressing mode
170 // immediate offset to avoid materializing in loops.
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
172                               const MachineInstr &UseMI,
173                               int OpNo,
174                               const MachineOperand &OpToFold) {
175   if (!OpToFold.isFI())
176     return false;
177 
178   if (TII->isMUBUF(UseMI))
179     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
180                                               AMDGPU::OpName::vaddr);
181   if (!TII->isFLATScratch(UseMI))
182     return false;
183 
184   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
185                                         AMDGPU::OpName::saddr);
186   if (OpNo == SIdx)
187     return true;
188 
189   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
190                                         AMDGPU::OpName::vaddr);
191   return OpNo == VIdx && SIdx == -1;
192 }
193 
194 FunctionPass *llvm::createSIFoldOperandsPass() {
195   return new SIFoldOperands();
196 }
197 
198 static bool updateOperand(FoldCandidate &Fold,
199                           const SIInstrInfo &TII,
200                           const TargetRegisterInfo &TRI,
201                           const GCNSubtarget &ST) {
202   MachineInstr *MI = Fold.UseMI;
203   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
204   assert(Old.isReg());
205 
206   if (Fold.isImm()) {
207     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
208         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
209         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
210                                       ST.hasInv2PiInlineImm())) {
211       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
212       // already set.
213       unsigned Opcode = MI->getOpcode();
214       int OpNo = MI->getOperandNo(&Old);
215       int ModIdx = -1;
216       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
217         ModIdx = AMDGPU::OpName::src0_modifiers;
218       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
219         ModIdx = AMDGPU::OpName::src1_modifiers;
220       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
221         ModIdx = AMDGPU::OpName::src2_modifiers;
222       assert(ModIdx != -1);
223       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
224       MachineOperand &Mod = MI->getOperand(ModIdx);
225       unsigned Val = Mod.getImm();
226       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
227         // Only apply the following transformation if that operand requries
228         // a packed immediate.
229         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
230         case AMDGPU::OPERAND_REG_IMM_V2FP16:
231         case AMDGPU::OPERAND_REG_IMM_V2INT16:
232         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
233         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
234           // If upper part is all zero we do not need op_sel_hi.
235           if (!isUInt<16>(Fold.ImmToFold)) {
236             if (!(Fold.ImmToFold & 0xffff)) {
237               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
238               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
239               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
240               return true;
241             }
242             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
243             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
244             return true;
245           }
246           break;
247         default:
248           break;
249         }
250       }
251     }
252   }
253 
254   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
255     MachineBasicBlock *MBB = MI->getParent();
256     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
257     if (Liveness != MachineBasicBlock::LQR_Dead) {
258       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
259       return false;
260     }
261 
262     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
263     int Op32 = Fold.getShrinkOpcode();
264     MachineOperand &Dst0 = MI->getOperand(0);
265     MachineOperand &Dst1 = MI->getOperand(1);
266     assert(Dst0.isDef() && Dst1.isDef());
267 
268     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
269 
270     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
271     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
272 
273     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
274 
275     if (HaveNonDbgCarryUse) {
276       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
277         .addReg(AMDGPU::VCC, RegState::Kill);
278     }
279 
280     // Keep the old instruction around to avoid breaking iterators, but
281     // replace it with a dummy instruction to remove uses.
282     //
283     // FIXME: We should not invert how this pass looks at operands to avoid
284     // this. Should track set of foldable movs instead of looking for uses
285     // when looking at a use.
286     Dst0.setReg(NewReg0);
287     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
288       MI->RemoveOperand(I);
289     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
290 
291     if (Fold.isCommuted())
292       TII.commuteInstruction(*Inst32, false);
293     return true;
294   }
295 
296   assert(!Fold.needsShrink() && "not handled");
297 
298   if (Fold.isImm()) {
299     Old.ChangeToImmediate(Fold.ImmToFold);
300     return true;
301   }
302 
303   if (Fold.isGlobal()) {
304     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
305                    Fold.OpToFold->getTargetFlags());
306     return true;
307   }
308 
309   if (Fold.isFI()) {
310     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
311     return true;
312   }
313 
314   MachineOperand *New = Fold.OpToFold;
315   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
316   Old.setIsUndef(New->isUndef());
317   return true;
318 }
319 
320 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
321                               const MachineInstr *MI) {
322   for (auto Candidate : FoldList) {
323     if (Candidate.UseMI == MI)
324       return true;
325   }
326   return false;
327 }
328 
329 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
330                                 MachineInstr *MI, unsigned OpNo,
331                                 MachineOperand *FoldOp, bool Commuted = false,
332                                 int ShrinkOp = -1) {
333   // Skip additional folding on the same operand.
334   for (FoldCandidate &Fold : FoldList)
335     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
336       return;
337   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
338                     << " operand " << OpNo << "\n  " << *MI << '\n');
339   FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
340 }
341 
342 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
343                              MachineInstr *MI, unsigned OpNo,
344                              MachineOperand *OpToFold,
345                              const SIInstrInfo *TII) {
346   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
347     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
348     unsigned Opc = MI->getOpcode();
349     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
350          Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
351         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
352       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
353                    Opc == AMDGPU::V_FMAC_F16_e64;
354       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
355                    Opc == AMDGPU::V_FMAC_F32_e64;
356       unsigned NewOpc = IsFMA ?
357         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
358         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
359 
360       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
361       // to fold the operand.
362       MI->setDesc(TII->get(NewOpc));
363       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
364       if (FoldAsMAD) {
365         MI->untieRegOperand(OpNo);
366         return true;
367       }
368       MI->setDesc(TII->get(Opc));
369     }
370 
371     // Special case for s_setreg_b32
372     if (OpToFold->isImm()) {
373       unsigned ImmOpc = 0;
374       if (Opc == AMDGPU::S_SETREG_B32)
375         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
376       else if (Opc == AMDGPU::S_SETREG_B32_mode)
377         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
378       if (ImmOpc) {
379         MI->setDesc(TII->get(ImmOpc));
380         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
381         return true;
382       }
383     }
384 
385     // If we are already folding into another operand of MI, then
386     // we can't commute the instruction, otherwise we risk making the
387     // other fold illegal.
388     if (isUseMIInFoldList(FoldList, MI))
389       return false;
390 
391     unsigned CommuteOpNo = OpNo;
392 
393     // Operand is not legal, so try to commute the instruction to
394     // see if this makes it possible to fold.
395     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
396     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
397     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
398 
399     if (CanCommute) {
400       if (CommuteIdx0 == OpNo)
401         CommuteOpNo = CommuteIdx1;
402       else if (CommuteIdx1 == OpNo)
403         CommuteOpNo = CommuteIdx0;
404     }
405 
406 
407     // One of operands might be an Imm operand, and OpNo may refer to it after
408     // the call of commuteInstruction() below. Such situations are avoided
409     // here explicitly as OpNo must be a register operand to be a candidate
410     // for memory folding.
411     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
412                        !MI->getOperand(CommuteIdx1).isReg()))
413       return false;
414 
415     if (!CanCommute ||
416         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
417       return false;
418 
419     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
420       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
421            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
422            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
423           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
424         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
425 
426         // Verify the other operand is a VGPR, otherwise we would violate the
427         // constant bus restriction.
428         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
429         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
430         if (!OtherOp.isReg() ||
431             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
432           return false;
433 
434         assert(MI->getOperand(1).isDef());
435 
436         // Make sure to get the 32-bit version of the commuted opcode.
437         unsigned MaybeCommutedOpc = MI->getOpcode();
438         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
439 
440         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
441         return true;
442       }
443 
444       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
445       return false;
446     }
447 
448     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
449     return true;
450   }
451 
452   // Check the case where we might introduce a second constant operand to a
453   // scalar instruction
454   if (TII->isSALU(MI->getOpcode())) {
455     const MCInstrDesc &InstDesc = MI->getDesc();
456     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
457     const SIRegisterInfo &SRI = TII->getRegisterInfo();
458 
459     // Fine if the operand can be encoded as an inline constant
460     if (OpToFold->isImm()) {
461       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
462           !TII->isInlineConstant(*OpToFold, OpInfo)) {
463         // Otherwise check for another constant
464         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
465           auto &Op = MI->getOperand(i);
466           if (OpNo != i &&
467               TII->isLiteralConstantLike(Op, OpInfo)) {
468             return false;
469           }
470         }
471       }
472     }
473   }
474 
475   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
476   return true;
477 }
478 
479 // If the use operand doesn't care about the value, this may be an operand only
480 // used for register indexing, in which case it is unsafe to fold.
481 static bool isUseSafeToFold(const SIInstrInfo *TII,
482                             const MachineInstr &MI,
483                             const MachineOperand &UseMO) {
484   if (UseMO.isUndef() || TII->isSDWA(MI))
485     return false;
486 
487   switch (MI.getOpcode()) {
488   case AMDGPU::V_MOV_B32_e32:
489   case AMDGPU::V_MOV_B32_e64:
490   case AMDGPU::V_MOV_B64_PSEUDO:
491     // Do not fold into an indirect mov.
492     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
493   }
494 
495   return true;
496   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
497 }
498 
499 // Find a def of the UseReg, check if it is a reg_seqence and find initializers
500 // for each subreg, tracking it to foldable inline immediate if possible.
501 // Returns true on success.
502 static bool getRegSeqInit(
503     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
504     Register UseReg, uint8_t OpTy,
505     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
506   MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
507   if (!Def || !Def->isRegSequence())
508     return false;
509 
510   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
511     MachineOperand *Sub = &Def->getOperand(I);
512     assert (Sub->isReg());
513 
514     for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
515          SubDef && Sub->isReg() && !Sub->getSubReg() &&
516          TII->isFoldableCopy(*SubDef);
517          SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
518       MachineOperand *Op = &SubDef->getOperand(1);
519       if (Op->isImm()) {
520         if (TII->isInlineConstant(*Op, OpTy))
521           Sub = Op;
522         break;
523       }
524       if (!Op->isReg())
525         break;
526       Sub = Op;
527     }
528 
529     Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
530   }
531 
532   return true;
533 }
534 
535 static bool tryToFoldACImm(const SIInstrInfo *TII,
536                            const MachineOperand &OpToFold,
537                            MachineInstr *UseMI,
538                            unsigned UseOpIdx,
539                            SmallVectorImpl<FoldCandidate> &FoldList) {
540   const MCInstrDesc &Desc = UseMI->getDesc();
541   const MCOperandInfo *OpInfo = Desc.OpInfo;
542   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
543     return false;
544 
545   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
546   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
547       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
548     return false;
549 
550   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
551       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
552     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
553     return true;
554   }
555 
556   if (!OpToFold.isReg())
557     return false;
558 
559   Register UseReg = OpToFold.getReg();
560   if (!UseReg.isVirtual())
561     return false;
562 
563   if (llvm::any_of(FoldList, [UseMI](const FoldCandidate &FC) {
564         return FC.UseMI == UseMI;
565       }))
566     return false;
567 
568   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
569   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
570   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
571     return false;
572 
573   int32_t Imm;
574   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
575     const MachineOperand *Op = Defs[I].first;
576     if (!Op->isImm())
577       return false;
578 
579     auto SubImm = Op->getImm();
580     if (!I) {
581       Imm = SubImm;
582       if (!TII->isInlineConstant(*Op, OpTy) ||
583           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
584         return false;
585 
586       continue;
587     }
588     if (Imm != SubImm)
589       return false; // Can only fold splat constants
590   }
591 
592   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
593   return true;
594 }
595 
596 void SIFoldOperands::foldOperand(
597   MachineOperand &OpToFold,
598   MachineInstr *UseMI,
599   int UseOpIdx,
600   SmallVectorImpl<FoldCandidate> &FoldList,
601   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
602   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
603 
604   if (!isUseSafeToFold(TII, *UseMI, UseOp))
605     return;
606 
607   // FIXME: Fold operands with subregs.
608   if (UseOp.isReg() && OpToFold.isReg()) {
609     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
610       return;
611   }
612 
613   // Special case for REG_SEQUENCE: We can't fold literals into
614   // REG_SEQUENCE instructions, so we have to fold them into the
615   // uses of REG_SEQUENCE.
616   if (UseMI->isRegSequence()) {
617     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
618     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
619 
620     MachineRegisterInfo::use_nodbg_iterator Next;
621     for (MachineRegisterInfo::use_nodbg_iterator
622            RSUse = MRI->use_nodbg_begin(RegSeqDstReg), RSE = MRI->use_nodbg_end();
623          RSUse != RSE; RSUse = Next) {
624       Next = std::next(RSUse);
625 
626       MachineInstr *RSUseMI = RSUse->getParent();
627 
628       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
629                          RSUse.getOperandNo(), FoldList))
630         continue;
631 
632       if (RSUse->getSubReg() != RegSeqDstSubReg)
633         continue;
634 
635       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
636                   CopiesToReplace);
637     }
638 
639     return;
640   }
641 
642   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
643     return;
644 
645   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
646     // Sanity check that this is a stack access.
647     // FIXME: Should probably use stack pseudos before frame lowering.
648 
649     if (TII->isMUBUF(*UseMI)) {
650       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
651           MFI->getScratchRSrcReg())
652         return;
653 
654       // Ensure this is either relative to the current frame or the current
655       // wave.
656       MachineOperand &SOff =
657           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
658       if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) &&
659           (!SOff.isImm() || SOff.getImm() != 0))
660         return;
661 
662       // If this is relative to the current wave, update it to be relative to
663       // the current frame.
664       if (SOff.isImm())
665         SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false);
666     }
667 
668     // A frame index will resolve to a positive constant, so it should always be
669     // safe to fold the addressing mode, even pre-GFX9.
670     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
671 
672     if (TII->isFLATScratch(*UseMI) &&
673         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
674                                    AMDGPU::OpName::vaddr) != -1) {
675       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
676       UseMI->setDesc(TII->get(NewOpc));
677     }
678 
679     return;
680   }
681 
682   bool FoldingImmLike =
683       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
684 
685   if (FoldingImmLike && UseMI->isCopy()) {
686     Register DestReg = UseMI->getOperand(0).getReg();
687     Register SrcReg = UseMI->getOperand(1).getReg();
688     assert(SrcReg.isVirtual());
689 
690     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
691 
692     // Don't fold into a copy to a physical register with the same class. Doing
693     // so would interfere with the register coalescer's logic which would avoid
694     // redundant initalizations.
695     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
696       return;
697 
698     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
699     if (!DestReg.isPhysical()) {
700       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
701         MachineRegisterInfo::use_nodbg_iterator NextUse;
702         SmallVector<FoldCandidate, 4> CopyUses;
703         for (MachineRegisterInfo::use_nodbg_iterator Use = MRI->use_nodbg_begin(DestReg),
704                E = MRI->use_nodbg_end();
705              Use != E; Use = NextUse) {
706           NextUse = std::next(Use);
707           // There's no point trying to fold into an implicit operand.
708           if (Use->isImplicit())
709             continue;
710 
711           FoldCandidate FC = FoldCandidate(Use->getParent(), Use.getOperandNo(),
712                                            &UseMI->getOperand(1));
713           CopyUses.push_back(FC);
714         }
715         for (auto &F : CopyUses) {
716           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
717         }
718       }
719 
720       if (DestRC == &AMDGPU::AGPR_32RegClass &&
721           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
722         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
723         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
724         CopiesToReplace.push_back(UseMI);
725         return;
726       }
727     }
728 
729     // In order to fold immediates into copies, we need to change the
730     // copy to a MOV.
731 
732     unsigned MovOp = TII->getMovOpcode(DestRC);
733     if (MovOp == AMDGPU::COPY)
734       return;
735 
736     UseMI->setDesc(TII->get(MovOp));
737     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
738     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
739     while (ImpOpI != ImpOpE) {
740       MachineInstr::mop_iterator Tmp = ImpOpI;
741       ImpOpI++;
742       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
743     }
744     CopiesToReplace.push_back(UseMI);
745   } else {
746     if (UseMI->isCopy() && OpToFold.isReg() &&
747         UseMI->getOperand(0).getReg().isVirtual() &&
748         !UseMI->getOperand(1).getSubReg()) {
749       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
750                         << "\n into " << *UseMI << '\n');
751       unsigned Size = TII->getOpSize(*UseMI, 1);
752       Register UseReg = OpToFold.getReg();
753       UseMI->getOperand(1).setReg(UseReg);
754       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
755       UseMI->getOperand(1).setIsKill(false);
756       CopiesToReplace.push_back(UseMI);
757       OpToFold.setIsKill(false);
758 
759       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
760       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
761       // its initializers right here, so we will rematerialize immediates and
762       // avoid copies via different reg classes.
763       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
764       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
765           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
766                         *MRI)) {
767         const DebugLoc &DL = UseMI->getDebugLoc();
768         MachineBasicBlock &MBB = *UseMI->getParent();
769 
770         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
771         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
772           UseMI->RemoveOperand(I);
773 
774         MachineInstrBuilder B(*MBB.getParent(), UseMI);
775         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
776         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
777         for (unsigned I = 0; I < Size / 4; ++I) {
778           MachineOperand *Def = Defs[I].first;
779           TargetInstrInfo::RegSubRegPair CopyToVGPR;
780           if (Def->isImm() &&
781               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
782             int64_t Imm = Def->getImm();
783 
784             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
785             BuildMI(MBB, UseMI, DL,
786                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
787             B.addReg(Tmp);
788           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
789             auto Src = getRegSubRegPair(*Def);
790             Def->setIsKill(false);
791             if (!SeenAGPRs.insert(Src)) {
792               // We cannot build a reg_sequence out of the same registers, they
793               // must be copied. Better do it here before copyPhysReg() created
794               // several reads to do the AGPR->VGPR->AGPR copy.
795               CopyToVGPR = Src;
796             } else {
797               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
798                        Src.SubReg);
799             }
800           } else {
801             assert(Def->isReg());
802             Def->setIsKill(false);
803             auto Src = getRegSubRegPair(*Def);
804 
805             // Direct copy from SGPR to AGPR is not possible. To avoid creation
806             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
807             // create a copy here and track if we already have such a copy.
808             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
809               CopyToVGPR = Src;
810             } else {
811               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
812               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
813               B.addReg(Tmp);
814             }
815           }
816 
817           if (CopyToVGPR.Reg) {
818             Register Vgpr;
819             if (VGPRCopies.count(CopyToVGPR)) {
820               Vgpr = VGPRCopies[CopyToVGPR];
821             } else {
822               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
823               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
824               VGPRCopies[CopyToVGPR] = Vgpr;
825             }
826             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
827             BuildMI(MBB, UseMI, DL,
828                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
829             B.addReg(Tmp);
830           }
831 
832           B.addImm(Defs[I].second);
833         }
834         LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
835         return;
836       }
837 
838       if (Size != 4)
839         return;
840       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
841           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
842         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
843       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
844                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
845         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
846       return;
847     }
848 
849     unsigned UseOpc = UseMI->getOpcode();
850     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
851         (UseOpc == AMDGPU::V_READLANE_B32 &&
852          (int)UseOpIdx ==
853          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
854       // %vgpr = V_MOV_B32 imm
855       // %sgpr = V_READFIRSTLANE_B32 %vgpr
856       // =>
857       // %sgpr = S_MOV_B32 imm
858       if (FoldingImmLike) {
859         if (execMayBeModifiedBeforeUse(*MRI,
860                                        UseMI->getOperand(UseOpIdx).getReg(),
861                                        *OpToFold.getParent(),
862                                        *UseMI))
863           return;
864 
865         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
866 
867         if (OpToFold.isImm())
868           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
869         else
870           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
871         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
872         return;
873       }
874 
875       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
876         if (execMayBeModifiedBeforeUse(*MRI,
877                                        UseMI->getOperand(UseOpIdx).getReg(),
878                                        *OpToFold.getParent(),
879                                        *UseMI))
880           return;
881 
882         // %vgpr = COPY %sgpr0
883         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
884         // =>
885         // %sgpr1 = COPY %sgpr0
886         UseMI->setDesc(TII->get(AMDGPU::COPY));
887         UseMI->getOperand(1).setReg(OpToFold.getReg());
888         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
889         UseMI->getOperand(1).setIsKill(false);
890         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
891         return;
892       }
893     }
894 
895     const MCInstrDesc &UseDesc = UseMI->getDesc();
896 
897     // Don't fold into target independent nodes.  Target independent opcodes
898     // don't have defined register classes.
899     if (UseDesc.isVariadic() ||
900         UseOp.isImplicit() ||
901         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
902       return;
903   }
904 
905   if (!FoldingImmLike) {
906     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
907 
908     // FIXME: We could try to change the instruction from 64-bit to 32-bit
909     // to enable more folding opportunites.  The shrink operands pass
910     // already does this.
911     return;
912   }
913 
914 
915   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
916   const TargetRegisterClass *FoldRC =
917     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
918 
919   // Split 64-bit constants into 32-bits for folding.
920   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
921     Register UseReg = UseOp.getReg();
922     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
923 
924     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
925       return;
926 
927     APInt Imm(64, OpToFold.getImm());
928     if (UseOp.getSubReg() == AMDGPU::sub0) {
929       Imm = Imm.getLoBits(32);
930     } else {
931       assert(UseOp.getSubReg() == AMDGPU::sub1);
932       Imm = Imm.getHiBits(32);
933     }
934 
935     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
936     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
937     return;
938   }
939 
940 
941 
942   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
943 }
944 
945 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
946                                   uint32_t LHS, uint32_t RHS) {
947   switch (Opcode) {
948   case AMDGPU::V_AND_B32_e64:
949   case AMDGPU::V_AND_B32_e32:
950   case AMDGPU::S_AND_B32:
951     Result = LHS & RHS;
952     return true;
953   case AMDGPU::V_OR_B32_e64:
954   case AMDGPU::V_OR_B32_e32:
955   case AMDGPU::S_OR_B32:
956     Result = LHS | RHS;
957     return true;
958   case AMDGPU::V_XOR_B32_e64:
959   case AMDGPU::V_XOR_B32_e32:
960   case AMDGPU::S_XOR_B32:
961     Result = LHS ^ RHS;
962     return true;
963   case AMDGPU::S_XNOR_B32:
964     Result = ~(LHS ^ RHS);
965     return true;
966   case AMDGPU::S_NAND_B32:
967     Result = ~(LHS & RHS);
968     return true;
969   case AMDGPU::S_NOR_B32:
970     Result = ~(LHS | RHS);
971     return true;
972   case AMDGPU::S_ANDN2_B32:
973     Result = LHS & ~RHS;
974     return true;
975   case AMDGPU::S_ORN2_B32:
976     Result = LHS | ~RHS;
977     return true;
978   case AMDGPU::V_LSHL_B32_e64:
979   case AMDGPU::V_LSHL_B32_e32:
980   case AMDGPU::S_LSHL_B32:
981     // The instruction ignores the high bits for out of bounds shifts.
982     Result = LHS << (RHS & 31);
983     return true;
984   case AMDGPU::V_LSHLREV_B32_e64:
985   case AMDGPU::V_LSHLREV_B32_e32:
986     Result = RHS << (LHS & 31);
987     return true;
988   case AMDGPU::V_LSHR_B32_e64:
989   case AMDGPU::V_LSHR_B32_e32:
990   case AMDGPU::S_LSHR_B32:
991     Result = LHS >> (RHS & 31);
992     return true;
993   case AMDGPU::V_LSHRREV_B32_e64:
994   case AMDGPU::V_LSHRREV_B32_e32:
995     Result = RHS >> (LHS & 31);
996     return true;
997   case AMDGPU::V_ASHR_I32_e64:
998   case AMDGPU::V_ASHR_I32_e32:
999   case AMDGPU::S_ASHR_I32:
1000     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
1001     return true;
1002   case AMDGPU::V_ASHRREV_I32_e64:
1003   case AMDGPU::V_ASHRREV_I32_e32:
1004     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1005     return true;
1006   default:
1007     return false;
1008   }
1009 }
1010 
1011 static unsigned getMovOpc(bool IsScalar) {
1012   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1013 }
1014 
1015 /// Remove any leftover implicit operands from mutating the instruction. e.g.
1016 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1017 /// anymore.
1018 static void stripExtraCopyOperands(MachineInstr &MI) {
1019   const MCInstrDesc &Desc = MI.getDesc();
1020   unsigned NumOps = Desc.getNumOperands() +
1021                     Desc.getNumImplicitUses() +
1022                     Desc.getNumImplicitDefs();
1023 
1024   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1025     MI.RemoveOperand(I);
1026 }
1027 
1028 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1029   MI.setDesc(NewDesc);
1030   stripExtraCopyOperands(MI);
1031 }
1032 
1033 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1034                                                MachineOperand &Op) {
1035   if (Op.isReg()) {
1036     // If this has a subregister, it obviously is a register source.
1037     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
1038       return &Op;
1039 
1040     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1041     if (Def && Def->isMoveImmediate()) {
1042       MachineOperand &ImmSrc = Def->getOperand(1);
1043       if (ImmSrc.isImm())
1044         return &ImmSrc;
1045     }
1046   }
1047 
1048   return &Op;
1049 }
1050 
1051 // Try to simplify operations with a constant that may appear after instruction
1052 // selection.
1053 // TODO: See if a frame index with a fixed offset can fold.
1054 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1055                               const SIInstrInfo *TII,
1056                               MachineInstr *MI,
1057                               MachineOperand *ImmOp) {
1058   unsigned Opc = MI->getOpcode();
1059   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1060       Opc == AMDGPU::S_NOT_B32) {
1061     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1062     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1063     return true;
1064   }
1065 
1066   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1067   if (Src1Idx == -1)
1068     return false;
1069 
1070   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1071   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1072   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1073 
1074   if (!Src0->isImm() && !Src1->isImm())
1075     return false;
1076 
1077   // and k0, k1 -> v_mov_b32 (k0 & k1)
1078   // or k0, k1 -> v_mov_b32 (k0 | k1)
1079   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1080   if (Src0->isImm() && Src1->isImm()) {
1081     int32_t NewImm;
1082     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1083       return false;
1084 
1085     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1086     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1087 
1088     // Be careful to change the right operand, src0 may belong to a different
1089     // instruction.
1090     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1091     MI->RemoveOperand(Src1Idx);
1092     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1093     return true;
1094   }
1095 
1096   if (!MI->isCommutable())
1097     return false;
1098 
1099   if (Src0->isImm() && !Src1->isImm()) {
1100     std::swap(Src0, Src1);
1101     std::swap(Src0Idx, Src1Idx);
1102   }
1103 
1104   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1105   if (Opc == AMDGPU::V_OR_B32_e64 ||
1106       Opc == AMDGPU::V_OR_B32_e32 ||
1107       Opc == AMDGPU::S_OR_B32) {
1108     if (Src1Val == 0) {
1109       // y = or x, 0 => y = copy x
1110       MI->RemoveOperand(Src1Idx);
1111       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1112     } else if (Src1Val == -1) {
1113       // y = or x, -1 => y = v_mov_b32 -1
1114       MI->RemoveOperand(Src1Idx);
1115       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1116     } else
1117       return false;
1118 
1119     return true;
1120   }
1121 
1122   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1123       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1124       MI->getOpcode() == AMDGPU::S_AND_B32) {
1125     if (Src1Val == 0) {
1126       // y = and x, 0 => y = v_mov_b32 0
1127       MI->RemoveOperand(Src0Idx);
1128       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1129     } else if (Src1Val == -1) {
1130       // y = and x, -1 => y = copy x
1131       MI->RemoveOperand(Src1Idx);
1132       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1133       stripExtraCopyOperands(*MI);
1134     } else
1135       return false;
1136 
1137     return true;
1138   }
1139 
1140   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1141       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1142       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1143     if (Src1Val == 0) {
1144       // y = xor x, 0 => y = copy x
1145       MI->RemoveOperand(Src1Idx);
1146       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1147       return true;
1148     }
1149   }
1150 
1151   return false;
1152 }
1153 
1154 // Try to fold an instruction into a simpler one
1155 static bool tryFoldInst(const SIInstrInfo *TII,
1156                         MachineInstr *MI) {
1157   unsigned Opc = MI->getOpcode();
1158 
1159   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1160       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1161       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1162     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1163     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1164     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1165     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1166     if (Src1->isIdenticalTo(*Src0) &&
1167         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1168         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1169       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1170       auto &NewDesc =
1171           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1172       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1173       if (Src2Idx != -1)
1174         MI->RemoveOperand(Src2Idx);
1175       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1176       if (Src1ModIdx != -1)
1177         MI->RemoveOperand(Src1ModIdx);
1178       if (Src0ModIdx != -1)
1179         MI->RemoveOperand(Src0ModIdx);
1180       mutateCopyOp(*MI, NewDesc);
1181       LLVM_DEBUG(dbgs() << *MI << '\n');
1182       return true;
1183     }
1184   }
1185 
1186   return false;
1187 }
1188 
1189 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1190                                      MachineOperand &OpToFold) const {
1191   // We need mutate the operands of new mov instructions to add implicit
1192   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1193   // this.
1194   SmallVector<MachineInstr *, 4> CopiesToReplace;
1195   SmallVector<FoldCandidate, 4> FoldList;
1196   MachineOperand &Dst = MI.getOperand(0);
1197 
1198   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1199   if (FoldingImm) {
1200     unsigned NumLiteralUses = 0;
1201     MachineOperand *NonInlineUse = nullptr;
1202     int NonInlineUseOpNo = -1;
1203 
1204     MachineRegisterInfo::use_nodbg_iterator NextUse;
1205     for (MachineRegisterInfo::use_nodbg_iterator
1206            Use = MRI->use_nodbg_begin(Dst.getReg()), E = MRI->use_nodbg_end();
1207          Use != E; Use = NextUse) {
1208       NextUse = std::next(Use);
1209       MachineInstr *UseMI = Use->getParent();
1210       unsigned OpNo = Use.getOperandNo();
1211 
1212       // Folding the immediate may reveal operations that can be constant
1213       // folded or replaced with a copy. This can happen for example after
1214       // frame indices are lowered to constants or from splitting 64-bit
1215       // constants.
1216       //
1217       // We may also encounter cases where one or both operands are
1218       // immediates materialized into a register, which would ordinarily not
1219       // be folded due to multiple uses or operand constraints.
1220 
1221       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1222         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1223 
1224         // Some constant folding cases change the same immediate's use to a new
1225         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1226         // again. The same constant folded instruction could also have a second
1227         // use operand.
1228         NextUse = MRI->use_nodbg_begin(Dst.getReg());
1229         FoldList.clear();
1230         continue;
1231       }
1232 
1233       // Try to fold any inline immediate uses, and then only fold other
1234       // constants if they have one use.
1235       //
1236       // The legality of the inline immediate must be checked based on the use
1237       // operand, not the defining instruction, because 32-bit instructions
1238       // with 32-bit inline immediate sources may be used to materialize
1239       // constants used in 16-bit operands.
1240       //
1241       // e.g. it is unsafe to fold:
1242       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1243       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1244 
1245       // Folding immediates with more than one use will increase program size.
1246       // FIXME: This will also reduce register usage, which may be better
1247       // in some cases. A better heuristic is needed.
1248       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1249         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1250       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1251         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1252                     CopiesToReplace);
1253       } else {
1254         if (++NumLiteralUses == 1) {
1255           NonInlineUse = &*Use;
1256           NonInlineUseOpNo = OpNo;
1257         }
1258       }
1259     }
1260 
1261     if (NumLiteralUses == 1) {
1262       MachineInstr *UseMI = NonInlineUse->getParent();
1263       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1264     }
1265   } else {
1266     // Folding register.
1267     SmallVector <MachineRegisterInfo::use_nodbg_iterator, 4> UsesToProcess;
1268     for (MachineRegisterInfo::use_nodbg_iterator
1269            Use = MRI->use_nodbg_begin(Dst.getReg()), E = MRI->use_nodbg_end();
1270          Use != E; ++Use) {
1271       UsesToProcess.push_back(Use);
1272     }
1273     for (auto U : UsesToProcess) {
1274       MachineInstr *UseMI = U->getParent();
1275 
1276       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1277         FoldList, CopiesToReplace);
1278     }
1279   }
1280 
1281   MachineFunction *MF = MI.getParent()->getParent();
1282   // Make sure we add EXEC uses to any new v_mov instructions created.
1283   for (MachineInstr *Copy : CopiesToReplace)
1284     Copy->addImplicitDefUseOperands(*MF);
1285 
1286   SmallPtrSet<MachineInstr *, 16> Folded;
1287   for (FoldCandidate &Fold : FoldList) {
1288     assert(!Fold.isReg() || Fold.OpToFold);
1289     if (Folded.count(Fold.UseMI))
1290       continue;
1291     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
1292       Register Reg = Fold.OpToFold->getReg();
1293       MachineInstr *DefMI = Fold.OpToFold->getParent();
1294       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1295           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1296         continue;
1297     }
1298     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1299       // Clear kill flags.
1300       if (Fold.isReg()) {
1301         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1302         // FIXME: Probably shouldn't bother trying to fold if not an
1303         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1304         // copies.
1305         MRI->clearKillFlags(Fold.OpToFold->getReg());
1306       }
1307       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1308                         << static_cast<int>(Fold.UseOpNo) << " of "
1309                         << *Fold.UseMI << '\n');
1310       if (tryFoldInst(TII, Fold.UseMI))
1311         Folded.insert(Fold.UseMI);
1312     } else if (Fold.isCommuted()) {
1313       // Restoring instruction's original operand order if fold has failed.
1314       TII->commuteInstruction(*Fold.UseMI, false);
1315     }
1316   }
1317 }
1318 
1319 // Clamp patterns are canonically selected to v_max_* instructions, so only
1320 // handle them.
1321 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1322   unsigned Op = MI.getOpcode();
1323   switch (Op) {
1324   case AMDGPU::V_MAX_F32_e64:
1325   case AMDGPU::V_MAX_F16_e64:
1326   case AMDGPU::V_MAX_F64:
1327   case AMDGPU::V_PK_MAX_F16: {
1328     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1329       return nullptr;
1330 
1331     // Make sure sources are identical.
1332     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1333     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1334     if (!Src0->isReg() || !Src1->isReg() ||
1335         Src0->getReg() != Src1->getReg() ||
1336         Src0->getSubReg() != Src1->getSubReg() ||
1337         Src0->getSubReg() != AMDGPU::NoSubRegister)
1338       return nullptr;
1339 
1340     // Can't fold up if we have modifiers.
1341     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1342       return nullptr;
1343 
1344     unsigned Src0Mods
1345       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1346     unsigned Src1Mods
1347       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1348 
1349     // Having a 0 op_sel_hi would require swizzling the output in the source
1350     // instruction, which we can't do.
1351     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1352                                                       : 0u;
1353     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1354       return nullptr;
1355     return Src0;
1356   }
1357   default:
1358     return nullptr;
1359   }
1360 }
1361 
1362 // We obviously have multiple uses in a clamp since the register is used twice
1363 // in the same instruction.
1364 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1365   int Count = 0;
1366   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1367        I != E; ++I) {
1368     if (++Count > 1)
1369       return false;
1370   }
1371 
1372   return true;
1373 }
1374 
1375 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1376 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1377   const MachineOperand *ClampSrc = isClamp(MI);
1378   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1379     return false;
1380 
1381   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1382 
1383   // The type of clamp must be compatible.
1384   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1385     return false;
1386 
1387   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1388   if (!DefClamp)
1389     return false;
1390 
1391   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1392                     << '\n');
1393 
1394   // Clamp is applied after omod, so it is OK if omod is set.
1395   DefClamp->setImm(1);
1396   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1397   MI.eraseFromParent();
1398   return true;
1399 }
1400 
1401 static int getOModValue(unsigned Opc, int64_t Val) {
1402   switch (Opc) {
1403   case AMDGPU::V_MUL_F32_e64: {
1404     switch (static_cast<uint32_t>(Val)) {
1405     case 0x3f000000: // 0.5
1406       return SIOutMods::DIV2;
1407     case 0x40000000: // 2.0
1408       return SIOutMods::MUL2;
1409     case 0x40800000: // 4.0
1410       return SIOutMods::MUL4;
1411     default:
1412       return SIOutMods::NONE;
1413     }
1414   }
1415   case AMDGPU::V_MUL_F16_e64: {
1416     switch (static_cast<uint16_t>(Val)) {
1417     case 0x3800: // 0.5
1418       return SIOutMods::DIV2;
1419     case 0x4000: // 2.0
1420       return SIOutMods::MUL2;
1421     case 0x4400: // 4.0
1422       return SIOutMods::MUL4;
1423     default:
1424       return SIOutMods::NONE;
1425     }
1426   }
1427   default:
1428     llvm_unreachable("invalid mul opcode");
1429   }
1430 }
1431 
1432 // FIXME: Does this really not support denormals with f16?
1433 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1434 // handled, so will anything other than that break?
1435 std::pair<const MachineOperand *, int>
1436 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1437   unsigned Op = MI.getOpcode();
1438   switch (Op) {
1439   case AMDGPU::V_MUL_F32_e64:
1440   case AMDGPU::V_MUL_F16_e64: {
1441     // If output denormals are enabled, omod is ignored.
1442     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1443         (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1444       return std::make_pair(nullptr, SIOutMods::NONE);
1445 
1446     const MachineOperand *RegOp = nullptr;
1447     const MachineOperand *ImmOp = nullptr;
1448     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1449     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1450     if (Src0->isImm()) {
1451       ImmOp = Src0;
1452       RegOp = Src1;
1453     } else if (Src1->isImm()) {
1454       ImmOp = Src1;
1455       RegOp = Src0;
1456     } else
1457       return std::make_pair(nullptr, SIOutMods::NONE);
1458 
1459     int OMod = getOModValue(Op, ImmOp->getImm());
1460     if (OMod == SIOutMods::NONE ||
1461         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1462         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1463         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1464         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1465       return std::make_pair(nullptr, SIOutMods::NONE);
1466 
1467     return std::make_pair(RegOp, OMod);
1468   }
1469   case AMDGPU::V_ADD_F32_e64:
1470   case AMDGPU::V_ADD_F16_e64: {
1471     // If output denormals are enabled, omod is ignored.
1472     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1473         (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1474       return std::make_pair(nullptr, SIOutMods::NONE);
1475 
1476     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1477     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1478     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1479 
1480     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1481         Src0->getSubReg() == Src1->getSubReg() &&
1482         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1483         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1484         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1485         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1486       return std::make_pair(Src0, SIOutMods::MUL2);
1487 
1488     return std::make_pair(nullptr, SIOutMods::NONE);
1489   }
1490   default:
1491     return std::make_pair(nullptr, SIOutMods::NONE);
1492   }
1493 }
1494 
1495 // FIXME: Does this need to check IEEE bit on function?
1496 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1497   const MachineOperand *RegOp;
1498   int OMod;
1499   std::tie(RegOp, OMod) = isOMod(MI);
1500   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1501       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1502       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1503     return false;
1504 
1505   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1506   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1507   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1508     return false;
1509 
1510   // Clamp is applied after omod. If the source already has clamp set, don't
1511   // fold it.
1512   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1513     return false;
1514 
1515   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1516 
1517   DefOMod->setImm(OMod);
1518   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1519   MI.eraseFromParent();
1520   return true;
1521 }
1522 
1523 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1524   if (skipFunction(MF.getFunction()))
1525     return false;
1526 
1527   MRI = &MF.getRegInfo();
1528   ST = &MF.getSubtarget<GCNSubtarget>();
1529   TII = ST->getInstrInfo();
1530   TRI = &TII->getRegisterInfo();
1531   MFI = MF.getInfo<SIMachineFunctionInfo>();
1532 
1533   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1534   // correctly handle signed zeros.
1535   //
1536   // FIXME: Also need to check strictfp
1537   bool IsIEEEMode = MFI->getMode().IEEE;
1538   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1539 
1540   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1541     MachineBasicBlock::iterator I, Next;
1542 
1543     MachineOperand *CurrentKnownM0Val = nullptr;
1544     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1545       Next = std::next(I);
1546       MachineInstr &MI = *I;
1547 
1548       tryFoldInst(TII, &MI);
1549 
1550       if (!TII->isFoldableCopy(MI)) {
1551         // Saw an unknown clobber of m0, so we no longer know what it is.
1552         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1553           CurrentKnownM0Val = nullptr;
1554 
1555         // TODO: Omod might be OK if there is NSZ only on the source
1556         // instruction, and not the omod multiply.
1557         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1558             !tryFoldOMod(MI))
1559           tryFoldClamp(MI);
1560 
1561         continue;
1562       }
1563 
1564       // Specially track simple redefs of m0 to the same value in a block, so we
1565       // can erase the later ones.
1566       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1567         MachineOperand &NewM0Val = MI.getOperand(1);
1568         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1569           MI.eraseFromParent();
1570           continue;
1571         }
1572 
1573         // We aren't tracking other physical registers
1574         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1575           nullptr : &NewM0Val;
1576         continue;
1577       }
1578 
1579       MachineOperand &OpToFold = MI.getOperand(1);
1580       bool FoldingImm =
1581           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1582 
1583       // FIXME: We could also be folding things like TargetIndexes.
1584       if (!FoldingImm && !OpToFold.isReg())
1585         continue;
1586 
1587       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1588         continue;
1589 
1590       // Prevent folding operands backwards in the function. For example,
1591       // the COPY opcode must not be replaced by 1 in this example:
1592       //
1593       //    %3 = COPY %vgpr0; VGPR_32:%3
1594       //    ...
1595       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1596       MachineOperand &Dst = MI.getOperand(0);
1597       if (Dst.isReg() && !Dst.getReg().isVirtual())
1598         continue;
1599 
1600       foldInstOperand(MI, OpToFold);
1601     }
1602   }
1603   return true;
1604 }
1605