1 //===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPU.h"
10 #include "GCNSubtarget.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "llvm/CodeGen/MachineFunctionPass.h"
13 #include "llvm/InitializePasses.h"
14 #include "llvm/CodeGen/LivePhysRegs.h"
15 
16 using namespace llvm;
17 
18 #define DEBUG_TYPE "si-optimize-exec-masking"
19 
20 namespace {
21 
22 class SIOptimizeExecMasking : public MachineFunctionPass {
23 public:
24   static char ID;
25 
26 public:
27   SIOptimizeExecMasking() : MachineFunctionPass(ID) {
28     initializeSIOptimizeExecMaskingPass(*PassRegistry::getPassRegistry());
29   }
30 
31   bool runOnMachineFunction(MachineFunction &MF) override;
32 
33   StringRef getPassName() const override {
34     return "SI optimize exec mask operations";
35   }
36 
37   void getAnalysisUsage(AnalysisUsage &AU) const override {
38     AU.setPreservesCFG();
39     MachineFunctionPass::getAnalysisUsage(AU);
40   }
41 };
42 
43 } // End anonymous namespace.
44 
45 INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
46                       "SI optimize exec mask operations", false, false)
47 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
48 INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
49                     "SI optimize exec mask operations", false, false)
50 
51 char SIOptimizeExecMasking::ID = 0;
52 
53 char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
54 
55 /// If \p MI is a copy from exec, return the register copied to.
56 static Register isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
57   switch (MI.getOpcode()) {
58   case AMDGPU::COPY:
59   case AMDGPU::S_MOV_B64:
60   case AMDGPU::S_MOV_B64_term:
61   case AMDGPU::S_MOV_B32:
62   case AMDGPU::S_MOV_B32_term: {
63     const MachineOperand &Src = MI.getOperand(1);
64     if (Src.isReg() &&
65         Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC))
66       return MI.getOperand(0).getReg();
67   }
68   }
69 
70   return AMDGPU::NoRegister;
71 }
72 
73 /// If \p MI is a copy to exec, return the register copied from.
74 static Register isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
75   switch (MI.getOpcode()) {
76   case AMDGPU::COPY:
77   case AMDGPU::S_MOV_B64:
78   case AMDGPU::S_MOV_B32: {
79     const MachineOperand &Dst = MI.getOperand(0);
80     if (Dst.isReg() &&
81         Dst.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) &&
82         MI.getOperand(1).isReg())
83       return MI.getOperand(1).getReg();
84     break;
85   }
86   case AMDGPU::S_MOV_B64_term:
87   case AMDGPU::S_MOV_B32_term:
88     llvm_unreachable("should have been replaced");
89   }
90 
91   return Register();
92 }
93 
94 /// If \p MI is a logical operation on an exec value,
95 /// return the register copied to.
96 static Register isLogicalOpOnExec(const MachineInstr &MI) {
97   switch (MI.getOpcode()) {
98   case AMDGPU::S_AND_B64:
99   case AMDGPU::S_OR_B64:
100   case AMDGPU::S_XOR_B64:
101   case AMDGPU::S_ANDN2_B64:
102   case AMDGPU::S_ORN2_B64:
103   case AMDGPU::S_NAND_B64:
104   case AMDGPU::S_NOR_B64:
105   case AMDGPU::S_XNOR_B64: {
106     const MachineOperand &Src1 = MI.getOperand(1);
107     if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
108       return MI.getOperand(0).getReg();
109     const MachineOperand &Src2 = MI.getOperand(2);
110     if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
111       return MI.getOperand(0).getReg();
112     break;
113   }
114   case AMDGPU::S_AND_B32:
115   case AMDGPU::S_OR_B32:
116   case AMDGPU::S_XOR_B32:
117   case AMDGPU::S_ANDN2_B32:
118   case AMDGPU::S_ORN2_B32:
119   case AMDGPU::S_NAND_B32:
120   case AMDGPU::S_NOR_B32:
121   case AMDGPU::S_XNOR_B32: {
122     const MachineOperand &Src1 = MI.getOperand(1);
123     if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
124       return MI.getOperand(0).getReg();
125     const MachineOperand &Src2 = MI.getOperand(2);
126     if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
127       return MI.getOperand(0).getReg();
128     break;
129   }
130   }
131 
132   return AMDGPU::NoRegister;
133 }
134 
135 static unsigned getSaveExecOp(unsigned Opc) {
136   switch (Opc) {
137   case AMDGPU::S_AND_B64:
138     return AMDGPU::S_AND_SAVEEXEC_B64;
139   case AMDGPU::S_OR_B64:
140     return AMDGPU::S_OR_SAVEEXEC_B64;
141   case AMDGPU::S_XOR_B64:
142     return AMDGPU::S_XOR_SAVEEXEC_B64;
143   case AMDGPU::S_ANDN2_B64:
144     return AMDGPU::S_ANDN2_SAVEEXEC_B64;
145   case AMDGPU::S_ORN2_B64:
146     return AMDGPU::S_ORN2_SAVEEXEC_B64;
147   case AMDGPU::S_NAND_B64:
148     return AMDGPU::S_NAND_SAVEEXEC_B64;
149   case AMDGPU::S_NOR_B64:
150     return AMDGPU::S_NOR_SAVEEXEC_B64;
151   case AMDGPU::S_XNOR_B64:
152     return AMDGPU::S_XNOR_SAVEEXEC_B64;
153   case AMDGPU::S_AND_B32:
154     return AMDGPU::S_AND_SAVEEXEC_B32;
155   case AMDGPU::S_OR_B32:
156     return AMDGPU::S_OR_SAVEEXEC_B32;
157   case AMDGPU::S_XOR_B32:
158     return AMDGPU::S_XOR_SAVEEXEC_B32;
159   case AMDGPU::S_ANDN2_B32:
160     return AMDGPU::S_ANDN2_SAVEEXEC_B32;
161   case AMDGPU::S_ORN2_B32:
162     return AMDGPU::S_ORN2_SAVEEXEC_B32;
163   case AMDGPU::S_NAND_B32:
164     return AMDGPU::S_NAND_SAVEEXEC_B32;
165   case AMDGPU::S_NOR_B32:
166     return AMDGPU::S_NOR_SAVEEXEC_B32;
167   case AMDGPU::S_XNOR_B32:
168     return AMDGPU::S_XNOR_SAVEEXEC_B32;
169   default:
170     return AMDGPU::INSTRUCTION_LIST_END;
171   }
172 }
173 
174 // These are only terminators to get correct spill code placement during
175 // register allocation, so turn them back into normal instructions.
176 static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
177   switch (MI.getOpcode()) {
178   case AMDGPU::S_MOV_B32_term: {
179     bool RegSrc = MI.getOperand(1).isReg();
180     MI.setDesc(TII.get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B32));
181     return true;
182   }
183   case AMDGPU::S_MOV_B64_term: {
184     bool RegSrc = MI.getOperand(1).isReg();
185     MI.setDesc(TII.get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B64));
186     return true;
187   }
188   case AMDGPU::S_XOR_B64_term: {
189     // This is only a terminator to get the correct spill code placement during
190     // register allocation.
191     MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
192     return true;
193   }
194   case AMDGPU::S_XOR_B32_term: {
195     // This is only a terminator to get the correct spill code placement during
196     // register allocation.
197     MI.setDesc(TII.get(AMDGPU::S_XOR_B32));
198     return true;
199   }
200   case AMDGPU::S_OR_B64_term: {
201     // This is only a terminator to get the correct spill code placement during
202     // register allocation.
203     MI.setDesc(TII.get(AMDGPU::S_OR_B64));
204     return true;
205   }
206   case AMDGPU::S_OR_B32_term: {
207     // This is only a terminator to get the correct spill code placement during
208     // register allocation.
209     MI.setDesc(TII.get(AMDGPU::S_OR_B32));
210     return true;
211   }
212   case AMDGPU::S_ANDN2_B64_term: {
213     // This is only a terminator to get the correct spill code placement during
214     // register allocation.
215     MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
216     return true;
217   }
218   case AMDGPU::S_ANDN2_B32_term: {
219     // This is only a terminator to get the correct spill code placement during
220     // register allocation.
221     MI.setDesc(TII.get(AMDGPU::S_ANDN2_B32));
222     return true;
223   }
224   case AMDGPU::S_AND_B64_term: {
225     // This is only a terminator to get the correct spill code placement during
226     // register allocation.
227     MI.setDesc(TII.get(AMDGPU::S_AND_B64));
228     return true;
229   }
230   case AMDGPU::S_AND_B32_term: {
231     // This is only a terminator to get the correct spill code placement during
232     // register allocation.
233     MI.setDesc(TII.get(AMDGPU::S_AND_B32));
234     return true;
235   }
236   default:
237     return false;
238   }
239 }
240 
241 // Turn all pseudoterminators in the block into their equivalent non-terminator
242 // instructions. Returns the reverse iterator to the first non-terminator
243 // instruction in the block.
244 static MachineBasicBlock::reverse_iterator fixTerminators(
245   const SIInstrInfo &TII,
246   MachineBasicBlock &MBB) {
247   MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
248 
249   bool Seen = false;
250   MachineBasicBlock::reverse_iterator FirstNonTerm = I;
251   for (; I != E; ++I) {
252     if (!I->isTerminator())
253       return Seen ? FirstNonTerm : I;
254 
255     if (removeTerminatorBit(TII, *I)) {
256       if (!Seen) {
257         FirstNonTerm = I;
258         Seen = true;
259       }
260     }
261   }
262 
263   return FirstNonTerm;
264 }
265 
266 static MachineBasicBlock::reverse_iterator findExecCopy(
267   const SIInstrInfo &TII,
268   const GCNSubtarget &ST,
269   MachineBasicBlock &MBB,
270   MachineBasicBlock::reverse_iterator I,
271   unsigned CopyToExec) {
272   const unsigned InstLimit = 25;
273 
274   auto E = MBB.rend();
275   for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
276     Register CopyFromExec = isCopyFromExec(*I, ST);
277     if (CopyFromExec.isValid())
278       return I;
279   }
280 
281   return E;
282 }
283 
284 // XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
285 // report the register as unavailable because a super-register with a lane mask
286 // is unavailable.
287 static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
288   for (MachineBasicBlock *Succ : MBB.successors()) {
289     if (Succ->isLiveIn(Reg))
290       return true;
291   }
292 
293   return false;
294 }
295 
296 // Backwards-iterate from Origin (for n=MaxInstructions iterations) until either
297 // the beginning of the BB is reached or Pred evaluates to true - which can be
298 // an arbitrary condition based on the current MachineInstr, for instance an
299 // target instruction. Breaks prematurely by returning nullptr if  one of the
300 // registers given in NonModifiableRegs is modified by the current instruction.
301 static MachineInstr *
302 findInstrBackwards(MachineInstr &Origin,
303                    std::function<bool(MachineInstr *)> Pred,
304                    ArrayRef<MCRegister> NonModifiableRegs,
305                    const SIRegisterInfo *TRI, unsigned MaxInstructions = 5) {
306   MachineBasicBlock::reverse_iterator A = Origin.getReverseIterator(),
307                                       E = Origin.getParent()->rend();
308   unsigned CurrentIteration = 0;
309 
310   for (++A; CurrentIteration < MaxInstructions && A != E; ++A) {
311     if (Pred(&*A))
312       return &*A;
313 
314     for (MCRegister Reg : NonModifiableRegs) {
315       if (A->modifiesRegister(Reg, TRI))
316         return nullptr;
317     }
318 
319     ++CurrentIteration;
320   }
321 
322   return nullptr;
323 }
324 
325 // Determine if a register Reg is not re-defined and still in use
326 // in the range (Stop..BB.end].
327 // It does so by backwards calculating liveness from the end of the BB until
328 // either Stop or the beginning of the BB is reached.
329 // After liveness is calculated, we can determine if Reg is still in use and not
330 // defined inbetween the instructions.
331 static bool isRegisterInUseAfter(MachineInstr &Stop, MCRegister Reg,
332                                  const SIRegisterInfo *TRI,
333                                  MachineRegisterInfo &MRI) {
334   LivePhysRegs LR(*TRI);
335   LR.addLiveOuts(*Stop.getParent());
336 
337   for (auto A = Stop.getParent()->rbegin();
338        A != Stop.getParent()->rend() && A != Stop; ++A) {
339     LR.stepBackward(*A);
340   }
341 
342   return !LR.available(MRI, Reg);
343 }
344 
345 // Tries to find a possibility to optimize a v_cmp ..., s_and_saveexec sequence
346 // by looking at an instance of a s_and_saveexec instruction. Returns a pointer
347 // to the v_cmp instruction if it is safe to replace the sequence (see the
348 // conditions in the function body). This is after register allocation, so some
349 // checks on operand dependencies need to be considered.
350 static MachineInstr *findPossibleVCMPVCMPXOptimization(
351     MachineInstr &SaveExec, MCRegister Exec, const SIRegisterInfo *TRI,
352     const SIInstrInfo *TII, MachineRegisterInfo &MRI) {
353 
354   MachineInstr *VCmp = nullptr;
355 
356   Register SaveExecDest = SaveExec.getOperand(0).getReg();
357   if (!TRI->isSGPRReg(MRI, SaveExecDest))
358     return nullptr;
359 
360   MachineOperand *SaveExecSrc0 =
361       TII->getNamedOperand(SaveExec, AMDGPU::OpName::src0);
362   if (!SaveExecSrc0->isReg())
363     return nullptr;
364 
365   // Try to find the last v_cmp instruction that defs the saveexec input
366   // operand without any write to Exec inbetween.
367   VCmp = findInstrBackwards(
368       SaveExec,
369       [&](MachineInstr *Check) {
370         return AMDGPU::getVCMPXOpFromVCMP(Check->getOpcode()) != -1 &&
371                Check->modifiesRegister(SaveExecSrc0->getReg(), TRI);
372       },
373       {Exec, SaveExecSrc0->getReg()}, TRI);
374 
375   if (!VCmp)
376     return nullptr;
377 
378   MachineOperand *VCmpDest = TII->getNamedOperand(*VCmp, AMDGPU::OpName::sdst);
379   assert(VCmpDest && "Should have an sdst operand!");
380 
381   // Check if any of the v_cmp source operands is written by the saveexec.
382   MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0);
383   if (Src0->isReg() && TRI->isSGPRReg(MRI, Src0->getReg()) &&
384       SaveExec.modifiesRegister(Src0->getReg(), TRI))
385     return nullptr;
386 
387   MachineOperand *Src1 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src1);
388   if (Src1->isReg() && TRI->isSGPRReg(MRI, Src1->getReg()) &&
389       SaveExec.modifiesRegister(Src1->getReg(), TRI))
390     return nullptr;
391 
392   // Don't do the transformation if the destination operand is included in
393   // it's MBB Live-outs, meaning it's used in any of it's successors, leading
394   // to incorrect code if the v_cmp and therefore the def of
395   // the dest operand is removed.
396   if (isLiveOut(*VCmp->getParent(), VCmpDest->getReg()))
397     return nullptr;
398 
399   // If the v_cmp target is in use after the s_and_saveexec, skip the
400   // optimization.
401   if (isRegisterInUseAfter(SaveExec, VCmpDest->getReg(), TRI,
402                           MRI))
403     return nullptr;
404 
405   // Try to determine if there is a write to any of the VCmp
406   // operands between the saveexec and the vcmp.
407   // If yes, additional VGPR spilling might need to be inserted. In this case,
408   // it's not worth replacing the instruction sequence.
409   SmallVector<MCRegister, 2> NonDefRegs;
410   if (Src0->isReg())
411     NonDefRegs.push_back(Src0->getReg());
412 
413   if (Src1->isReg())
414     NonDefRegs.push_back(Src1->getReg());
415 
416   if (!findInstrBackwards(
417           SaveExec, [&](MachineInstr *Check) { return Check == VCmp; },
418           NonDefRegs, TRI))
419     return nullptr;
420 
421   return VCmp;
422 }
423 
424 // Inserts the optimized s_mov_b32 / v_cmpx sequence based on the
425 // operands extracted from a v_cmp ..., s_and_saveexec pattern.
426 static bool optimizeVCMPSaveExecSequence(MachineInstr &SaveExecInstr,
427                                          MachineInstr &VCmp, MCRegister Exec,
428                                          const SIInstrInfo *TII,
429                                          const SIRegisterInfo *TRI,
430                                          MachineRegisterInfo &MRI) {
431   const int NewOpcode = AMDGPU::getVCMPXOpFromVCMP(VCmp.getOpcode());
432 
433   if (NewOpcode == -1)
434     return false;
435 
436   MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0);
437   MachineOperand *Src1 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src1);
438 
439   Register MoveDest = SaveExecInstr.getOperand(0).getReg();
440 
441   MachineBasicBlock::instr_iterator InsertPosIt = SaveExecInstr.getIterator();
442   if (!SaveExecInstr.uses().empty()) {
443     bool isSGPR32 = TRI->getRegSizeInBits(MoveDest, MRI) == 32;
444     unsigned MovOpcode = isSGPR32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
445     BuildMI(*SaveExecInstr.getParent(), InsertPosIt,
446             SaveExecInstr.getDebugLoc(), TII->get(MovOpcode), MoveDest)
447         .addReg(Exec);
448   }
449 
450   // Omit dst as V_CMPX is implicitly writing to EXEC.
451   // Add dummy src and clamp modifiers, if needed.
452   auto Builder = BuildMI(*VCmp.getParent(), std::next(InsertPosIt),
453                          VCmp.getDebugLoc(), TII->get(NewOpcode));
454 
455   if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src0_modifiers) !=
456       -1)
457     Builder.addImm(0);
458 
459   Builder.add(*Src0);
460 
461   if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src1_modifiers) !=
462       -1)
463     Builder.addImm(0);
464 
465   Builder.add(*Src1);
466 
467   if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::clamp) != -1)
468     Builder.addImm(0);
469 
470   return true;
471 }
472 
473 bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
474   if (skipFunction(MF.getFunction()))
475     return false;
476 
477   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
478   const SIRegisterInfo *TRI = ST.getRegisterInfo();
479   const SIInstrInfo *TII = ST.getInstrInfo();
480   MachineRegisterInfo *MRI = &MF.getRegInfo();
481   MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
482 
483   // Optimize sequences emitted for control flow lowering. They are originally
484   // emitted as the separate operations because spill code may need to be
485   // inserted for the saved copy of exec.
486   //
487   //     x = copy exec
488   //     z = s_<op>_b64 x, y
489   //     exec = copy z
490   // =>
491   //     x = s_<op>_saveexec_b64 y
492   //
493 
494   bool Changed = false;
495   for (MachineBasicBlock &MBB : MF) {
496     MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
497     MachineBasicBlock::reverse_iterator E = MBB.rend();
498     if (I == E)
499       continue;
500 
501     // It's possible to see other terminator copies after the exec copy. This
502     // can happen if control flow pseudos had their outputs used by phis.
503     Register CopyToExec;
504 
505     unsigned SearchCount = 0;
506     const unsigned SearchLimit = 5;
507     while (I != E && SearchCount++ < SearchLimit) {
508       CopyToExec = isCopyToExec(*I, ST);
509       if (CopyToExec)
510         break;
511       ++I;
512     }
513 
514     if (!CopyToExec)
515       continue;
516 
517     // Scan backwards to find the def.
518     auto CopyToExecInst = &*I;
519     auto CopyFromExecInst = findExecCopy(*TII, ST, MBB, I, CopyToExec);
520     if (CopyFromExecInst == E) {
521       auto PrepareExecInst = std::next(I);
522       if (PrepareExecInst == E)
523         continue;
524       // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
525       if (CopyToExecInst->getOperand(1).isKill() &&
526           isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
527         LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
528 
529         PrepareExecInst->getOperand(0).setReg(Exec);
530 
531         LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
532 
533         CopyToExecInst->eraseFromParent();
534         Changed = true;
535       }
536 
537       continue;
538     }
539 
540     if (isLiveOut(MBB, CopyToExec)) {
541       // The copied register is live out and has a second use in another block.
542       LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
543       continue;
544     }
545 
546     Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
547     MachineInstr *SaveExecInst = nullptr;
548     SmallVector<MachineInstr *, 4> OtherUseInsts;
549 
550     for (MachineBasicBlock::iterator J
551            = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
552          J != JE; ++J) {
553       if (SaveExecInst && J->readsRegister(Exec, TRI)) {
554         LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
555         // Make sure this is inserted after any VALU ops that may have been
556         // scheduled in between.
557         SaveExecInst = nullptr;
558         break;
559       }
560 
561       bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
562 
563       if (J->modifiesRegister(CopyToExec, TRI)) {
564         if (SaveExecInst) {
565           LLVM_DEBUG(dbgs() << "Multiple instructions modify "
566                             << printReg(CopyToExec, TRI) << '\n');
567           SaveExecInst = nullptr;
568           break;
569         }
570 
571         unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
572         if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
573           break;
574 
575         if (ReadsCopyFromExec) {
576           SaveExecInst = &*J;
577           LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
578           continue;
579         } else {
580           LLVM_DEBUG(dbgs()
581                      << "Instruction does not read exec copy: " << *J << '\n');
582           break;
583         }
584       } else if (ReadsCopyFromExec && !SaveExecInst) {
585         // Make sure no other instruction is trying to use this copy, before it
586         // will be rewritten by the saveexec, i.e. hasOneUse. There may have
587         // been another use, such as an inserted spill. For example:
588         //
589         // %sgpr0_sgpr1 = COPY %exec
590         // spill %sgpr0_sgpr1
591         // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
592         //
593         LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
594                           << '\n');
595         break;
596       }
597 
598       if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
599         assert(SaveExecInst != &*J);
600         OtherUseInsts.push_back(&*J);
601       }
602     }
603 
604     if (!SaveExecInst)
605       continue;
606 
607     LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
608 
609     MachineOperand &Src0 = SaveExecInst->getOperand(1);
610     MachineOperand &Src1 = SaveExecInst->getOperand(2);
611 
612     MachineOperand *OtherOp = nullptr;
613 
614     if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
615       OtherOp = &Src1;
616     } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
617       if (!SaveExecInst->isCommutable())
618         break;
619 
620       OtherOp = &Src0;
621     } else
622       llvm_unreachable("unexpected");
623 
624     CopyFromExecInst->eraseFromParent();
625 
626     auto InsPt = SaveExecInst->getIterator();
627     const DebugLoc &DL = SaveExecInst->getDebugLoc();
628 
629     BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
630             CopyFromExec)
631       .addReg(OtherOp->getReg());
632     SaveExecInst->eraseFromParent();
633 
634     CopyToExecInst->eraseFromParent();
635 
636     for (MachineInstr *OtherInst : OtherUseInsts) {
637       OtherInst->substituteRegister(CopyToExec, Exec,
638                                     AMDGPU::NoSubRegister, *TRI);
639     }
640 
641     Changed = true;
642   }
643 
644   // After all s_op_saveexec instructions are inserted,
645   // replace (on GFX10.3 and later)
646   // v_cmp_* SGPR, IMM, VGPR
647   // s_and_saveexec_b32 EXEC_SGPR_DEST, SGPR
648   // with
649   // s_mov_b32 EXEC_SGPR_DEST, exec_lo
650   // v_cmpx_* IMM, VGPR
651   // to reduce pipeline stalls.
652   if (ST.hasGFX10_3Insts()) {
653     DenseMap<MachineInstr *, MachineInstr *> SaveExecVCmpMapping;
654     const unsigned AndSaveExecOpcode =
655         ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
656 
657     for (MachineBasicBlock &MBB : MF) {
658       for (MachineInstr &MI : MBB) {
659         // Record relevant v_cmp / s_and_saveexec instruction pairs for
660         // replacement.
661         if (MI.getOpcode() != AndSaveExecOpcode)
662           continue;
663 
664         if (MachineInstr *VCmp =
665                 findPossibleVCMPVCMPXOptimization(MI, Exec, TRI, TII, *MRI))
666           SaveExecVCmpMapping[&MI] = VCmp;
667       }
668     }
669 
670     for (const auto &Entry : SaveExecVCmpMapping) {
671       MachineInstr *SaveExecInstr = Entry.getFirst();
672       MachineInstr *VCmpInstr = Entry.getSecond();
673 
674       if (optimizeVCMPSaveExecSequence(*SaveExecInstr, *VCmpInstr, Exec, TII,
675                                        TRI, *MRI)) {
676         SaveExecInstr->eraseFromParent();
677         VCmpInstr->eraseFromParent();
678 
679         Changed = true;
680       }
681     }
682   }
683 
684   return Changed;
685 }
686