1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 //
11 
12 #include "AMDGPU.h"
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "SIMachineFunctionInfo.h"
16 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/CodeGen/LiveIntervals.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "si-fold-operands"
27 using namespace llvm;
28 
29 namespace {
30 
31 struct FoldCandidate {
32   MachineInstr *UseMI;
33   union {
34     MachineOperand *OpToFold;
35     uint64_t ImmToFold;
36     int FrameIndexToFold;
37   };
38   unsigned char UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false) :
44     UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
45     Commuted(Commuted_) {
46     if (FoldOp->isImm()) {
47       ImmToFold = FoldOp->getImm();
48     } else if (FoldOp->isFI()) {
49       FrameIndexToFold = FoldOp->getIndex();
50     } else {
51       assert(FoldOp->isReg());
52       OpToFold = FoldOp;
53     }
54   }
55 
56   bool isFI() const {
57     return Kind == MachineOperand::MO_FrameIndex;
58   }
59 
60   bool isImm() const {
61     return Kind == MachineOperand::MO_Immediate;
62   }
63 
64   bool isReg() const {
65     return Kind == MachineOperand::MO_Register;
66   }
67 
68   bool isCommuted() const {
69     return Commuted;
70   }
71 };
72 
73 class SIFoldOperands : public MachineFunctionPass {
74 public:
75   static char ID;
76   MachineRegisterInfo *MRI;
77   const SIInstrInfo *TII;
78   const SIRegisterInfo *TRI;
79   const GCNSubtarget *ST;
80 
81   void foldOperand(MachineOperand &OpToFold,
82                    MachineInstr *UseMI,
83                    unsigned UseOpIdx,
84                    SmallVectorImpl<FoldCandidate> &FoldList,
85                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
86 
87   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
88 
89   const MachineOperand *isClamp(const MachineInstr &MI) const;
90   bool tryFoldClamp(MachineInstr &MI);
91 
92   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
93   bool tryFoldOMod(MachineInstr &MI);
94 
95 public:
96   SIFoldOperands() : MachineFunctionPass(ID) {
97     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
98   }
99 
100   bool runOnMachineFunction(MachineFunction &MF) override;
101 
102   StringRef getPassName() const override { return "SI Fold Operands"; }
103 
104   void getAnalysisUsage(AnalysisUsage &AU) const override {
105     AU.setPreservesCFG();
106     MachineFunctionPass::getAnalysisUsage(AU);
107   }
108 };
109 
110 } // End anonymous namespace.
111 
112 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
113                 "SI Fold Operands", false, false)
114 
115 char SIFoldOperands::ID = 0;
116 
117 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
118 
119 // Wrapper around isInlineConstant that understands special cases when
120 // instruction types are replaced during operand folding.
121 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
122                                      const MachineInstr &UseMI,
123                                      unsigned OpNo,
124                                      const MachineOperand &OpToFold) {
125   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
126     return true;
127 
128   unsigned Opc = UseMI.getOpcode();
129   switch (Opc) {
130   case AMDGPU::V_MAC_F32_e64:
131   case AMDGPU::V_MAC_F16_e64:
132   case AMDGPU::V_FMAC_F32_e64: {
133     // Special case for mac. Since this is replaced with mad when folded into
134     // src2, we need to check the legality for the final instruction.
135     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
136     if (static_cast<int>(OpNo) == Src2Idx) {
137       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
138       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
139 
140       unsigned Opc = IsFMA ?
141         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
142       const MCInstrDesc &MadDesc = TII->get(Opc);
143       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
144     }
145     return false;
146   }
147   default:
148     return false;
149   }
150 }
151 
152 FunctionPass *llvm::createSIFoldOperandsPass() {
153   return new SIFoldOperands();
154 }
155 
156 static bool updateOperand(FoldCandidate &Fold,
157                           const TargetRegisterInfo &TRI) {
158   MachineInstr *MI = Fold.UseMI;
159   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
160   assert(Old.isReg());
161 
162   if (Fold.isImm()) {
163     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) {
164       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
165       // already set.
166       unsigned Opcode = MI->getOpcode();
167       int OpNo = MI->getOperandNo(&Old);
168       int ModIdx = -1;
169       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
170         ModIdx = AMDGPU::OpName::src0_modifiers;
171       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
172         ModIdx = AMDGPU::OpName::src1_modifiers;
173       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
174         ModIdx = AMDGPU::OpName::src2_modifiers;
175       assert(ModIdx != -1);
176       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
177       MachineOperand &Mod = MI->getOperand(ModIdx);
178       unsigned Val = Mod.getImm();
179       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
180         return false;
181       // If upper part is all zero we do not need op_sel_hi.
182       if (!isUInt<16>(Fold.ImmToFold)) {
183         if (!(Fold.ImmToFold & 0xffff)) {
184           Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
185           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
186           Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
187           return true;
188         }
189         Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
190       }
191     }
192     Old.ChangeToImmediate(Fold.ImmToFold);
193     return true;
194   }
195 
196   if (Fold.isFI()) {
197     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
198     return true;
199   }
200 
201   MachineOperand *New = Fold.OpToFold;
202   if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
203       TargetRegisterInfo::isVirtualRegister(New->getReg())) {
204     Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
205 
206     Old.setIsUndef(New->isUndef());
207     return true;
208   }
209 
210   // FIXME: Handle physical registers.
211 
212   return false;
213 }
214 
215 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
216                               const MachineInstr *MI) {
217   for (auto Candidate : FoldList) {
218     if (Candidate.UseMI == MI)
219       return true;
220   }
221   return false;
222 }
223 
224 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
225                              MachineInstr *MI, unsigned OpNo,
226                              MachineOperand *OpToFold,
227                              const SIInstrInfo *TII) {
228   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
229 
230     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
231     unsigned Opc = MI->getOpcode();
232     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
233          Opc == AMDGPU::V_FMAC_F32_e64) &&
234         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
235       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
236       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
237       unsigned NewOpc = IsFMA ?
238         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
239 
240       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
241       // to fold the operand.
242       MI->setDesc(TII->get(NewOpc));
243       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
244       if (FoldAsMAD) {
245         MI->untieRegOperand(OpNo);
246         return true;
247       }
248       MI->setDesc(TII->get(Opc));
249     }
250 
251     // Special case for s_setreg_b32
252     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
253       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
254       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
255       return true;
256     }
257 
258     // If we are already folding into another operand of MI, then
259     // we can't commute the instruction, otherwise we risk making the
260     // other fold illegal.
261     if (isUseMIInFoldList(FoldList, MI))
262       return false;
263 
264     // Operand is not legal, so try to commute the instruction to
265     // see if this makes it possible to fold.
266     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
267     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
268     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
269 
270     if (CanCommute) {
271       if (CommuteIdx0 == OpNo)
272         OpNo = CommuteIdx1;
273       else if (CommuteIdx1 == OpNo)
274         OpNo = CommuteIdx0;
275     }
276 
277     // One of operands might be an Imm operand, and OpNo may refer to it after
278     // the call of commuteInstruction() below. Such situations are avoided
279     // here explicitly as OpNo must be a register operand to be a candidate
280     // for memory folding.
281     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
282                        !MI->getOperand(CommuteIdx1).isReg()))
283       return false;
284 
285     if (!CanCommute ||
286         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
287       return false;
288 
289     if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
290       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
291       return false;
292     }
293 
294     FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
295     return true;
296   }
297 
298   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
299   return true;
300 }
301 
302 // If the use operand doesn't care about the value, this may be an operand only
303 // used for register indexing, in which case it is unsafe to fold.
304 static bool isUseSafeToFold(const SIInstrInfo *TII,
305                             const MachineInstr &MI,
306                             const MachineOperand &UseMO) {
307   return !UseMO.isUndef() && !TII->isSDWA(MI);
308   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
309 }
310 
311 void SIFoldOperands::foldOperand(
312   MachineOperand &OpToFold,
313   MachineInstr *UseMI,
314   unsigned UseOpIdx,
315   SmallVectorImpl<FoldCandidate> &FoldList,
316   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
317   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
318 
319   if (!isUseSafeToFold(TII, *UseMI, UseOp))
320     return;
321 
322   // FIXME: Fold operands with subregs.
323   if (UseOp.isReg() && OpToFold.isReg()) {
324     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
325       return;
326 
327     // Don't fold subregister extracts into tied operands, only if it is a full
328     // copy since a subregister use tied to a full register def doesn't really
329     // make sense. e.g. don't fold:
330     //
331     // %1 = COPY %0:sub1
332     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
333     //
334     //  into
335     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
336     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
337       return;
338   }
339 
340   // Special case for REG_SEQUENCE: We can't fold literals into
341   // REG_SEQUENCE instructions, so we have to fold them into the
342   // uses of REG_SEQUENCE.
343   if (UseMI->isRegSequence()) {
344     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
345     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
346 
347     for (MachineRegisterInfo::use_iterator
348            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
349          RSUse != RSE; ++RSUse) {
350 
351       MachineInstr *RSUseMI = RSUse->getParent();
352       if (RSUse->getSubReg() != RegSeqDstSubReg)
353         continue;
354 
355       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
356                   CopiesToReplace);
357     }
358 
359     return;
360   }
361 
362 
363   bool FoldingImm = OpToFold.isImm();
364 
365   // In order to fold immediates into copies, we need to change the
366   // copy to a MOV.
367   if (FoldingImm && UseMI->isCopy()) {
368     unsigned DestReg = UseMI->getOperand(0).getReg();
369     const TargetRegisterClass *DestRC
370       = TargetRegisterInfo::isVirtualRegister(DestReg) ?
371       MRI->getRegClass(DestReg) :
372       TRI->getPhysRegClass(DestReg);
373 
374     unsigned MovOp = TII->getMovOpcode(DestRC);
375     if (MovOp == AMDGPU::COPY)
376       return;
377 
378     UseMI->setDesc(TII->get(MovOp));
379     CopiesToReplace.push_back(UseMI);
380   } else {
381     const MCInstrDesc &UseDesc = UseMI->getDesc();
382 
383     // Don't fold into target independent nodes.  Target independent opcodes
384     // don't have defined register classes.
385     if (UseDesc.isVariadic() ||
386         UseOp.isImplicit() ||
387         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
388       return;
389   }
390 
391   if (!FoldingImm) {
392     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
393 
394     // FIXME: We could try to change the instruction from 64-bit to 32-bit
395     // to enable more folding opportunites.  The shrink operands pass
396     // already does this.
397     return;
398   }
399 
400 
401   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
402   const TargetRegisterClass *FoldRC =
403     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
404 
405 
406   // Split 64-bit constants into 32-bits for folding.
407   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
408     unsigned UseReg = UseOp.getReg();
409     const TargetRegisterClass *UseRC
410       = TargetRegisterInfo::isVirtualRegister(UseReg) ?
411       MRI->getRegClass(UseReg) :
412       TRI->getPhysRegClass(UseReg);
413 
414     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
415       return;
416 
417     APInt Imm(64, OpToFold.getImm());
418     if (UseOp.getSubReg() == AMDGPU::sub0) {
419       Imm = Imm.getLoBits(32);
420     } else {
421       assert(UseOp.getSubReg() == AMDGPU::sub1);
422       Imm = Imm.getHiBits(32);
423     }
424 
425     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
426     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
427     return;
428   }
429 
430 
431 
432   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
433 }
434 
435 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
436                                   uint32_t LHS, uint32_t RHS) {
437   switch (Opcode) {
438   case AMDGPU::V_AND_B32_e64:
439   case AMDGPU::V_AND_B32_e32:
440   case AMDGPU::S_AND_B32:
441     Result = LHS & RHS;
442     return true;
443   case AMDGPU::V_OR_B32_e64:
444   case AMDGPU::V_OR_B32_e32:
445   case AMDGPU::S_OR_B32:
446     Result = LHS | RHS;
447     return true;
448   case AMDGPU::V_XOR_B32_e64:
449   case AMDGPU::V_XOR_B32_e32:
450   case AMDGPU::S_XOR_B32:
451     Result = LHS ^ RHS;
452     return true;
453   case AMDGPU::V_LSHL_B32_e64:
454   case AMDGPU::V_LSHL_B32_e32:
455   case AMDGPU::S_LSHL_B32:
456     // The instruction ignores the high bits for out of bounds shifts.
457     Result = LHS << (RHS & 31);
458     return true;
459   case AMDGPU::V_LSHLREV_B32_e64:
460   case AMDGPU::V_LSHLREV_B32_e32:
461     Result = RHS << (LHS & 31);
462     return true;
463   case AMDGPU::V_LSHR_B32_e64:
464   case AMDGPU::V_LSHR_B32_e32:
465   case AMDGPU::S_LSHR_B32:
466     Result = LHS >> (RHS & 31);
467     return true;
468   case AMDGPU::V_LSHRREV_B32_e64:
469   case AMDGPU::V_LSHRREV_B32_e32:
470     Result = RHS >> (LHS & 31);
471     return true;
472   case AMDGPU::V_ASHR_I32_e64:
473   case AMDGPU::V_ASHR_I32_e32:
474   case AMDGPU::S_ASHR_I32:
475     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
476     return true;
477   case AMDGPU::V_ASHRREV_I32_e64:
478   case AMDGPU::V_ASHRREV_I32_e32:
479     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
480     return true;
481   default:
482     return false;
483   }
484 }
485 
486 static unsigned getMovOpc(bool IsScalar) {
487   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
488 }
489 
490 /// Remove any leftover implicit operands from mutating the instruction. e.g.
491 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
492 /// anymore.
493 static void stripExtraCopyOperands(MachineInstr &MI) {
494   const MCInstrDesc &Desc = MI.getDesc();
495   unsigned NumOps = Desc.getNumOperands() +
496                     Desc.getNumImplicitUses() +
497                     Desc.getNumImplicitDefs();
498 
499   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
500     MI.RemoveOperand(I);
501 }
502 
503 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
504   MI.setDesc(NewDesc);
505   stripExtraCopyOperands(MI);
506 }
507 
508 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
509                                                MachineOperand &Op) {
510   if (Op.isReg()) {
511     // If this has a subregister, it obviously is a register source.
512     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
513         !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
514       return &Op;
515 
516     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
517     if (Def && Def->isMoveImmediate()) {
518       MachineOperand &ImmSrc = Def->getOperand(1);
519       if (ImmSrc.isImm())
520         return &ImmSrc;
521     }
522   }
523 
524   return &Op;
525 }
526 
527 // Try to simplify operations with a constant that may appear after instruction
528 // selection.
529 // TODO: See if a frame index with a fixed offset can fold.
530 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
531                               const SIInstrInfo *TII,
532                               MachineInstr *MI,
533                               MachineOperand *ImmOp) {
534   unsigned Opc = MI->getOpcode();
535   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
536       Opc == AMDGPU::S_NOT_B32) {
537     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
538     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
539     return true;
540   }
541 
542   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
543   if (Src1Idx == -1)
544     return false;
545 
546   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
547   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
548   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
549 
550   if (!Src0->isImm() && !Src1->isImm())
551     return false;
552 
553   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
554     if (Src0->isImm() && Src0->getImm() == 0) {
555       // v_lshl_or_b32 0, X, Y -> copy Y
556       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
557       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
558       MI->RemoveOperand(Src1Idx);
559       MI->RemoveOperand(Src0Idx);
560 
561       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
562       return true;
563     }
564   }
565 
566   // and k0, k1 -> v_mov_b32 (k0 & k1)
567   // or k0, k1 -> v_mov_b32 (k0 | k1)
568   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
569   if (Src0->isImm() && Src1->isImm()) {
570     int32_t NewImm;
571     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
572       return false;
573 
574     const SIRegisterInfo &TRI = TII->getRegisterInfo();
575     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
576 
577     // Be careful to change the right operand, src0 may belong to a different
578     // instruction.
579     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
580     MI->RemoveOperand(Src1Idx);
581     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
582     return true;
583   }
584 
585   if (!MI->isCommutable())
586     return false;
587 
588   if (Src0->isImm() && !Src1->isImm()) {
589     std::swap(Src0, Src1);
590     std::swap(Src0Idx, Src1Idx);
591   }
592 
593   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
594   if (Opc == AMDGPU::V_OR_B32_e64 ||
595       Opc == AMDGPU::V_OR_B32_e32 ||
596       Opc == AMDGPU::S_OR_B32) {
597     if (Src1Val == 0) {
598       // y = or x, 0 => y = copy x
599       MI->RemoveOperand(Src1Idx);
600       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
601     } else if (Src1Val == -1) {
602       // y = or x, -1 => y = v_mov_b32 -1
603       MI->RemoveOperand(Src1Idx);
604       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
605     } else
606       return false;
607 
608     return true;
609   }
610 
611   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
612       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
613       MI->getOpcode() == AMDGPU::S_AND_B32) {
614     if (Src1Val == 0) {
615       // y = and x, 0 => y = v_mov_b32 0
616       MI->RemoveOperand(Src0Idx);
617       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
618     } else if (Src1Val == -1) {
619       // y = and x, -1 => y = copy x
620       MI->RemoveOperand(Src1Idx);
621       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
622       stripExtraCopyOperands(*MI);
623     } else
624       return false;
625 
626     return true;
627   }
628 
629   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
630       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
631       MI->getOpcode() == AMDGPU::S_XOR_B32) {
632     if (Src1Val == 0) {
633       // y = xor x, 0 => y = copy x
634       MI->RemoveOperand(Src1Idx);
635       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
636       return true;
637     }
638   }
639 
640   return false;
641 }
642 
643 // Try to fold an instruction into a simpler one
644 static bool tryFoldInst(const SIInstrInfo *TII,
645                         MachineInstr *MI) {
646   unsigned Opc = MI->getOpcode();
647 
648   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
649       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
650       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
651     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
652     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
653     if (Src1->isIdenticalTo(*Src0)) {
654       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
655       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
656       if (Src2Idx != -1)
657         MI->RemoveOperand(Src2Idx);
658       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
659       mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
660                                                : getMovOpc(false)));
661       LLVM_DEBUG(dbgs() << *MI << '\n');
662       return true;
663     }
664   }
665 
666   return false;
667 }
668 
669 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
670                                      MachineOperand &OpToFold) const {
671   // We need mutate the operands of new mov instructions to add implicit
672   // uses of EXEC, but adding them invalidates the use_iterator, so defer
673   // this.
674   SmallVector<MachineInstr *, 4> CopiesToReplace;
675   SmallVector<FoldCandidate, 4> FoldList;
676   MachineOperand &Dst = MI.getOperand(0);
677 
678   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
679   if (FoldingImm) {
680     unsigned NumLiteralUses = 0;
681     MachineOperand *NonInlineUse = nullptr;
682     int NonInlineUseOpNo = -1;
683 
684     MachineRegisterInfo::use_iterator NextUse;
685     for (MachineRegisterInfo::use_iterator
686            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
687          Use != E; Use = NextUse) {
688       NextUse = std::next(Use);
689       MachineInstr *UseMI = Use->getParent();
690       unsigned OpNo = Use.getOperandNo();
691 
692       // Folding the immediate may reveal operations that can be constant
693       // folded or replaced with a copy. This can happen for example after
694       // frame indices are lowered to constants or from splitting 64-bit
695       // constants.
696       //
697       // We may also encounter cases where one or both operands are
698       // immediates materialized into a register, which would ordinarily not
699       // be folded due to multiple uses or operand constraints.
700 
701       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
702         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
703 
704         // Some constant folding cases change the same immediate's use to a new
705         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
706         // again. The same constant folded instruction could also have a second
707         // use operand.
708         NextUse = MRI->use_begin(Dst.getReg());
709         FoldList.clear();
710         continue;
711       }
712 
713       // Try to fold any inline immediate uses, and then only fold other
714       // constants if they have one use.
715       //
716       // The legality of the inline immediate must be checked based on the use
717       // operand, not the defining instruction, because 32-bit instructions
718       // with 32-bit inline immediate sources may be used to materialize
719       // constants used in 16-bit operands.
720       //
721       // e.g. it is unsafe to fold:
722       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
723       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
724 
725       // Folding immediates with more than one use will increase program size.
726       // FIXME: This will also reduce register usage, which may be better
727       // in some cases. A better heuristic is needed.
728       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
729         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
730       } else {
731         if (++NumLiteralUses == 1) {
732           NonInlineUse = &*Use;
733           NonInlineUseOpNo = OpNo;
734         }
735       }
736     }
737 
738     if (NumLiteralUses == 1) {
739       MachineInstr *UseMI = NonInlineUse->getParent();
740       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
741     }
742   } else {
743     // Folding register.
744     for (MachineRegisterInfo::use_iterator
745            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
746          Use != E; ++Use) {
747       MachineInstr *UseMI = Use->getParent();
748 
749       foldOperand(OpToFold, UseMI, Use.getOperandNo(),
750                   FoldList, CopiesToReplace);
751     }
752   }
753 
754   MachineFunction *MF = MI.getParent()->getParent();
755   // Make sure we add EXEC uses to any new v_mov instructions created.
756   for (MachineInstr *Copy : CopiesToReplace)
757     Copy->addImplicitDefUseOperands(*MF);
758 
759   for (FoldCandidate &Fold : FoldList) {
760     if (updateOperand(Fold, *TRI)) {
761       // Clear kill flags.
762       if (Fold.isReg()) {
763         assert(Fold.OpToFold && Fold.OpToFold->isReg());
764         // FIXME: Probably shouldn't bother trying to fold if not an
765         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
766         // copies.
767         MRI->clearKillFlags(Fold.OpToFold->getReg());
768       }
769       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
770                         << static_cast<int>(Fold.UseOpNo) << " of "
771                         << *Fold.UseMI << '\n');
772       tryFoldInst(TII, Fold.UseMI);
773     } else if (Fold.isCommuted()) {
774       // Restoring instruction's original operand order if fold has failed.
775       TII->commuteInstruction(*Fold.UseMI, false);
776     }
777   }
778 }
779 
780 // Clamp patterns are canonically selected to v_max_* instructions, so only
781 // handle them.
782 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
783   unsigned Op = MI.getOpcode();
784   switch (Op) {
785   case AMDGPU::V_MAX_F32_e64:
786   case AMDGPU::V_MAX_F16_e64:
787   case AMDGPU::V_MAX_F64:
788   case AMDGPU::V_PK_MAX_F16: {
789     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
790       return nullptr;
791 
792     // Make sure sources are identical.
793     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
794     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
795     if (!Src0->isReg() || !Src1->isReg() ||
796         Src0->getReg() != Src1->getReg() ||
797         Src0->getSubReg() != Src1->getSubReg() ||
798         Src0->getSubReg() != AMDGPU::NoSubRegister)
799       return nullptr;
800 
801     // Can't fold up if we have modifiers.
802     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
803       return nullptr;
804 
805     unsigned Src0Mods
806       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
807     unsigned Src1Mods
808       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
809 
810     // Having a 0 op_sel_hi would require swizzling the output in the source
811     // instruction, which we can't do.
812     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0;
813     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
814       return nullptr;
815     return Src0;
816   }
817   default:
818     return nullptr;
819   }
820 }
821 
822 // We obviously have multiple uses in a clamp since the register is used twice
823 // in the same instruction.
824 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
825   int Count = 0;
826   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
827        I != E; ++I) {
828     if (++Count > 1)
829       return false;
830   }
831 
832   return true;
833 }
834 
835 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
836 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
837   const MachineOperand *ClampSrc = isClamp(MI);
838   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
839     return false;
840 
841   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
842 
843   // The type of clamp must be compatible.
844   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
845     return false;
846 
847   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
848   if (!DefClamp)
849     return false;
850 
851   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
852                     << '\n');
853 
854   // Clamp is applied after omod, so it is OK if omod is set.
855   DefClamp->setImm(1);
856   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
857   MI.eraseFromParent();
858   return true;
859 }
860 
861 static int getOModValue(unsigned Opc, int64_t Val) {
862   switch (Opc) {
863   case AMDGPU::V_MUL_F32_e64: {
864     switch (static_cast<uint32_t>(Val)) {
865     case 0x3f000000: // 0.5
866       return SIOutMods::DIV2;
867     case 0x40000000: // 2.0
868       return SIOutMods::MUL2;
869     case 0x40800000: // 4.0
870       return SIOutMods::MUL4;
871     default:
872       return SIOutMods::NONE;
873     }
874   }
875   case AMDGPU::V_MUL_F16_e64: {
876     switch (static_cast<uint16_t>(Val)) {
877     case 0x3800: // 0.5
878       return SIOutMods::DIV2;
879     case 0x4000: // 2.0
880       return SIOutMods::MUL2;
881     case 0x4400: // 4.0
882       return SIOutMods::MUL4;
883     default:
884       return SIOutMods::NONE;
885     }
886   }
887   default:
888     llvm_unreachable("invalid mul opcode");
889   }
890 }
891 
892 // FIXME: Does this really not support denormals with f16?
893 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
894 // handled, so will anything other than that break?
895 std::pair<const MachineOperand *, int>
896 SIFoldOperands::isOMod(const MachineInstr &MI) const {
897   unsigned Op = MI.getOpcode();
898   switch (Op) {
899   case AMDGPU::V_MUL_F32_e64:
900   case AMDGPU::V_MUL_F16_e64: {
901     // If output denormals are enabled, omod is ignored.
902     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
903         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
904       return std::make_pair(nullptr, SIOutMods::NONE);
905 
906     const MachineOperand *RegOp = nullptr;
907     const MachineOperand *ImmOp = nullptr;
908     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
909     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
910     if (Src0->isImm()) {
911       ImmOp = Src0;
912       RegOp = Src1;
913     } else if (Src1->isImm()) {
914       ImmOp = Src1;
915       RegOp = Src0;
916     } else
917       return std::make_pair(nullptr, SIOutMods::NONE);
918 
919     int OMod = getOModValue(Op, ImmOp->getImm());
920     if (OMod == SIOutMods::NONE ||
921         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
922         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
923         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
924         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
925       return std::make_pair(nullptr, SIOutMods::NONE);
926 
927     return std::make_pair(RegOp, OMod);
928   }
929   case AMDGPU::V_ADD_F32_e64:
930   case AMDGPU::V_ADD_F16_e64: {
931     // If output denormals are enabled, omod is ignored.
932     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
933         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
934       return std::make_pair(nullptr, SIOutMods::NONE);
935 
936     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
937     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
938     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
939 
940     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
941         Src0->getSubReg() == Src1->getSubReg() &&
942         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
943         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
944         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
945         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
946       return std::make_pair(Src0, SIOutMods::MUL2);
947 
948     return std::make_pair(nullptr, SIOutMods::NONE);
949   }
950   default:
951     return std::make_pair(nullptr, SIOutMods::NONE);
952   }
953 }
954 
955 // FIXME: Does this need to check IEEE bit on function?
956 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
957   const MachineOperand *RegOp;
958   int OMod;
959   std::tie(RegOp, OMod) = isOMod(MI);
960   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
961       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
962       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
963     return false;
964 
965   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
966   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
967   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
968     return false;
969 
970   // Clamp is applied after omod. If the source already has clamp set, don't
971   // fold it.
972   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
973     return false;
974 
975   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
976 
977   DefOMod->setImm(OMod);
978   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
979   MI.eraseFromParent();
980   return true;
981 }
982 
983 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
984   if (skipFunction(MF.getFunction()))
985     return false;
986 
987   MRI = &MF.getRegInfo();
988   ST = &MF.getSubtarget<GCNSubtarget>();
989   TII = ST->getInstrInfo();
990   TRI = &TII->getRegisterInfo();
991 
992   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
993 
994   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
995   // correctly handle signed zeros.
996   //
997   // TODO: Check nsz on instructions when fast math flags are preserved to MI
998   // level.
999   bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
1000 
1001   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1002     MachineBasicBlock::iterator I, Next;
1003     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1004       Next = std::next(I);
1005       MachineInstr &MI = *I;
1006 
1007       tryFoldInst(TII, &MI);
1008 
1009       if (!TII->isFoldableCopy(MI)) {
1010         if (IsIEEEMode || !tryFoldOMod(MI))
1011           tryFoldClamp(MI);
1012         continue;
1013       }
1014 
1015       MachineOperand &OpToFold = MI.getOperand(1);
1016       bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
1017 
1018       // FIXME: We could also be folding things like TargetIndexes.
1019       if (!FoldingImm && !OpToFold.isReg())
1020         continue;
1021 
1022       if (OpToFold.isReg() &&
1023           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
1024         continue;
1025 
1026       // Prevent folding operands backwards in the function. For example,
1027       // the COPY opcode must not be replaced by 1 in this example:
1028       //
1029       //    %3 = COPY %vgpr0; VGPR_32:%3
1030       //    ...
1031       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1032       MachineOperand &Dst = MI.getOperand(0);
1033       if (Dst.isReg() &&
1034           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1035         continue;
1036 
1037       foldInstOperand(MI, OpToFold);
1038     }
1039   }
1040   return false;
1041 }
1042