1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/MC/MCInstBuilder.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/TargetRegistry.h"
30 
31 using namespace llvm;
32 
33 #define GEN_CHECK_COMPRESS_INSTR
34 #include "RISCVGenCompressInstEmitter.inc"
35 
36 #define GET_INSTRINFO_CTOR_DTOR
37 #include "RISCVGenInstrInfo.inc"
38 
39 namespace llvm {
40 namespace RISCVVPseudosTable {
41 
42 using namespace RISCV;
43 
44 #define GET_RISCVVPseudosTable_IMPL
45 #include "RISCVGenSearchableTables.inc"
46 
47 } // namespace RISCVVPseudosTable
48 } // namespace llvm
49 
50 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
51     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
52       STI(STI) {}
53 
54 MCInst RISCVInstrInfo::getNop() const {
55   if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
56     return MCInstBuilder(RISCV::C_NOP);
57   return MCInstBuilder(RISCV::ADDI)
58       .addReg(RISCV::X0)
59       .addReg(RISCV::X0)
60       .addImm(0);
61 }
62 
63 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
64                                              int &FrameIndex) const {
65   switch (MI.getOpcode()) {
66   default:
67     return 0;
68   case RISCV::LB:
69   case RISCV::LBU:
70   case RISCV::LH:
71   case RISCV::LHU:
72   case RISCV::FLH:
73   case RISCV::LW:
74   case RISCV::FLW:
75   case RISCV::LWU:
76   case RISCV::LD:
77   case RISCV::FLD:
78     break;
79   }
80 
81   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
82       MI.getOperand(2).getImm() == 0) {
83     FrameIndex = MI.getOperand(1).getIndex();
84     return MI.getOperand(0).getReg();
85   }
86 
87   return 0;
88 }
89 
90 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
91                                             int &FrameIndex) const {
92   switch (MI.getOpcode()) {
93   default:
94     return 0;
95   case RISCV::SB:
96   case RISCV::SH:
97   case RISCV::SW:
98   case RISCV::FSH:
99   case RISCV::FSW:
100   case RISCV::SD:
101   case RISCV::FSD:
102     break;
103   }
104 
105   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
106       MI.getOperand(2).getImm() == 0) {
107     FrameIndex = MI.getOperand(1).getIndex();
108     return MI.getOperand(0).getReg();
109   }
110 
111   return 0;
112 }
113 
114 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
115                                         unsigned NumRegs) {
116   // We really want the positive remainder mod 32 here, that happens to be
117   // easily obtainable with a mask.
118   return ((DstReg - SrcReg) & 0x1f) < NumRegs;
119 }
120 
121 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
122                                  MachineBasicBlock::iterator MBBI,
123                                  const DebugLoc &DL, MCRegister DstReg,
124                                  MCRegister SrcReg, bool KillSrc) const {
125   if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
126     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
127         .addReg(SrcReg, getKillRegState(KillSrc))
128         .addImm(0);
129     return;
130   }
131 
132   // FPR->FPR copies and VR->VR copies.
133   unsigned Opc;
134   bool IsScalableVector = true;
135   unsigned NF = 1;
136   unsigned LMul = 1;
137   unsigned SubRegIdx = RISCV::sub_vrm1_0;
138   if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
139     Opc = RISCV::FSGNJ_H;
140     IsScalableVector = false;
141   } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
142     Opc = RISCV::FSGNJ_S;
143     IsScalableVector = false;
144   } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
145     Opc = RISCV::FSGNJ_D;
146     IsScalableVector = false;
147   } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
148     Opc = RISCV::PseudoVMV1R_V;
149   } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
150     Opc = RISCV::PseudoVMV2R_V;
151   } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
152     Opc = RISCV::PseudoVMV4R_V;
153   } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
154     Opc = RISCV::PseudoVMV8R_V;
155   } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
156     Opc = RISCV::PseudoVMV1R_V;
157     SubRegIdx = RISCV::sub_vrm1_0;
158     NF = 2;
159     LMul = 1;
160   } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
161     Opc = RISCV::PseudoVMV2R_V;
162     SubRegIdx = RISCV::sub_vrm2_0;
163     NF = 2;
164     LMul = 2;
165   } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
166     Opc = RISCV::PseudoVMV4R_V;
167     SubRegIdx = RISCV::sub_vrm4_0;
168     NF = 2;
169     LMul = 4;
170   } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
171     Opc = RISCV::PseudoVMV1R_V;
172     SubRegIdx = RISCV::sub_vrm1_0;
173     NF = 3;
174     LMul = 1;
175   } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
176     Opc = RISCV::PseudoVMV2R_V;
177     SubRegIdx = RISCV::sub_vrm2_0;
178     NF = 3;
179     LMul = 2;
180   } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
181     Opc = RISCV::PseudoVMV1R_V;
182     SubRegIdx = RISCV::sub_vrm1_0;
183     NF = 4;
184     LMul = 1;
185   } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
186     Opc = RISCV::PseudoVMV2R_V;
187     SubRegIdx = RISCV::sub_vrm2_0;
188     NF = 4;
189     LMul = 2;
190   } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
191     Opc = RISCV::PseudoVMV1R_V;
192     SubRegIdx = RISCV::sub_vrm1_0;
193     NF = 5;
194     LMul = 1;
195   } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
196     Opc = RISCV::PseudoVMV1R_V;
197     SubRegIdx = RISCV::sub_vrm1_0;
198     NF = 6;
199     LMul = 1;
200   } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
201     Opc = RISCV::PseudoVMV1R_V;
202     SubRegIdx = RISCV::sub_vrm1_0;
203     NF = 7;
204     LMul = 1;
205   } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
206     Opc = RISCV::PseudoVMV1R_V;
207     SubRegIdx = RISCV::sub_vrm1_0;
208     NF = 8;
209     LMul = 1;
210   } else {
211     llvm_unreachable("Impossible reg-to-reg copy");
212   }
213 
214   if (IsScalableVector) {
215     if (NF == 1) {
216       BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
217           .addReg(SrcReg, getKillRegState(KillSrc));
218     } else {
219       const TargetRegisterInfo *TRI = STI.getRegisterInfo();
220 
221       int I = 0, End = NF, Incr = 1;
222       unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
223       unsigned DstEncoding = TRI->getEncodingValue(DstReg);
224       if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) {
225         I = NF - 1;
226         End = -1;
227         Incr = -1;
228       }
229 
230       for (; I != End; I += Incr) {
231         BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I))
232             .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
233                     getKillRegState(KillSrc));
234       }
235     }
236   } else {
237     BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
238         .addReg(SrcReg, getKillRegState(KillSrc))
239         .addReg(SrcReg, getKillRegState(KillSrc));
240   }
241 }
242 
243 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
244                                          MachineBasicBlock::iterator I,
245                                          Register SrcReg, bool IsKill, int FI,
246                                          const TargetRegisterClass *RC,
247                                          const TargetRegisterInfo *TRI) const {
248   DebugLoc DL;
249   if (I != MBB.end())
250     DL = I->getDebugLoc();
251 
252   MachineFunction *MF = MBB.getParent();
253   MachineFrameInfo &MFI = MF->getFrameInfo();
254 
255   unsigned Opcode;
256   bool IsScalableVector = true;
257   bool IsZvlsseg = true;
258   if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
259     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
260              RISCV::SW : RISCV::SD;
261     IsScalableVector = false;
262   } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
263     Opcode = RISCV::FSH;
264     IsScalableVector = false;
265   } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
266     Opcode = RISCV::FSW;
267     IsScalableVector = false;
268   } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
269     Opcode = RISCV::FSD;
270     IsScalableVector = false;
271   } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
272     Opcode = RISCV::PseudoVSPILL_M1;
273     IsZvlsseg = false;
274   } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
275     Opcode = RISCV::PseudoVSPILL_M2;
276     IsZvlsseg = false;
277   } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
278     Opcode = RISCV::PseudoVSPILL_M4;
279     IsZvlsseg = false;
280   } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
281     Opcode = RISCV::PseudoVSPILL_M8;
282     IsZvlsseg = false;
283   } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
284     Opcode = RISCV::PseudoVSPILL2_M1;
285   else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
286     Opcode = RISCV::PseudoVSPILL2_M2;
287   else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
288     Opcode = RISCV::PseudoVSPILL2_M4;
289   else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
290     Opcode = RISCV::PseudoVSPILL3_M1;
291   else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
292     Opcode = RISCV::PseudoVSPILL3_M2;
293   else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
294     Opcode = RISCV::PseudoVSPILL4_M1;
295   else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
296     Opcode = RISCV::PseudoVSPILL4_M2;
297   else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
298     Opcode = RISCV::PseudoVSPILL5_M1;
299   else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
300     Opcode = RISCV::PseudoVSPILL6_M1;
301   else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
302     Opcode = RISCV::PseudoVSPILL7_M1;
303   else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
304     Opcode = RISCV::PseudoVSPILL8_M1;
305   else
306     llvm_unreachable("Can't store this register to stack slot");
307 
308   if (IsScalableVector) {
309     MachineMemOperand *MMO = MF->getMachineMemOperand(
310         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
311         MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
312 
313     MFI.setStackID(FI, TargetStackID::ScalableVector);
314     auto MIB = BuildMI(MBB, I, DL, get(Opcode))
315                    .addReg(SrcReg, getKillRegState(IsKill))
316                    .addFrameIndex(FI)
317                    .addMemOperand(MMO);
318     if (IsZvlsseg) {
319       // For spilling/reloading Zvlsseg registers, append the dummy field for
320       // the scaled vector length. The argument will be used when expanding
321       // these pseudo instructions.
322       MIB.addReg(RISCV::X0);
323     }
324   } else {
325     MachineMemOperand *MMO = MF->getMachineMemOperand(
326         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
327         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
328 
329     BuildMI(MBB, I, DL, get(Opcode))
330         .addReg(SrcReg, getKillRegState(IsKill))
331         .addFrameIndex(FI)
332         .addImm(0)
333         .addMemOperand(MMO);
334   }
335 }
336 
337 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
338                                           MachineBasicBlock::iterator I,
339                                           Register DstReg, int FI,
340                                           const TargetRegisterClass *RC,
341                                           const TargetRegisterInfo *TRI) const {
342   DebugLoc DL;
343   if (I != MBB.end())
344     DL = I->getDebugLoc();
345 
346   MachineFunction *MF = MBB.getParent();
347   MachineFrameInfo &MFI = MF->getFrameInfo();
348 
349   unsigned Opcode;
350   bool IsScalableVector = true;
351   bool IsZvlsseg = true;
352   if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
353     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
354              RISCV::LW : RISCV::LD;
355     IsScalableVector = false;
356   } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
357     Opcode = RISCV::FLH;
358     IsScalableVector = false;
359   } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
360     Opcode = RISCV::FLW;
361     IsScalableVector = false;
362   } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
363     Opcode = RISCV::FLD;
364     IsScalableVector = false;
365   } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
366     Opcode = RISCV::PseudoVRELOAD_M1;
367     IsZvlsseg = false;
368   } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
369     Opcode = RISCV::PseudoVRELOAD_M2;
370     IsZvlsseg = false;
371   } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
372     Opcode = RISCV::PseudoVRELOAD_M4;
373     IsZvlsseg = false;
374   } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
375     Opcode = RISCV::PseudoVRELOAD_M8;
376     IsZvlsseg = false;
377   } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
378     Opcode = RISCV::PseudoVRELOAD2_M1;
379   else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
380     Opcode = RISCV::PseudoVRELOAD2_M2;
381   else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
382     Opcode = RISCV::PseudoVRELOAD2_M4;
383   else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
384     Opcode = RISCV::PseudoVRELOAD3_M1;
385   else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
386     Opcode = RISCV::PseudoVRELOAD3_M2;
387   else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
388     Opcode = RISCV::PseudoVRELOAD4_M1;
389   else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
390     Opcode = RISCV::PseudoVRELOAD4_M2;
391   else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
392     Opcode = RISCV::PseudoVRELOAD5_M1;
393   else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
394     Opcode = RISCV::PseudoVRELOAD6_M1;
395   else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
396     Opcode = RISCV::PseudoVRELOAD7_M1;
397   else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
398     Opcode = RISCV::PseudoVRELOAD8_M1;
399   else
400     llvm_unreachable("Can't load this register from stack slot");
401 
402   if (IsScalableVector) {
403     MachineMemOperand *MMO = MF->getMachineMemOperand(
404         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
405         MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
406 
407     MFI.setStackID(FI, TargetStackID::ScalableVector);
408     auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
409                    .addFrameIndex(FI)
410                    .addMemOperand(MMO);
411     if (IsZvlsseg) {
412       // For spilling/reloading Zvlsseg registers, append the dummy field for
413       // the scaled vector length. The argument will be used when expanding
414       // these pseudo instructions.
415       MIB.addReg(RISCV::X0);
416     }
417   } else {
418     MachineMemOperand *MMO = MF->getMachineMemOperand(
419         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
420         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
421 
422     BuildMI(MBB, I, DL, get(Opcode), DstReg)
423         .addFrameIndex(FI)
424         .addImm(0)
425         .addMemOperand(MMO);
426   }
427 }
428 
429 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
430                             MachineBasicBlock::iterator MBBI,
431                             const DebugLoc &DL, Register DstReg, uint64_t Val,
432                             MachineInstr::MIFlag Flag) const {
433   MachineFunction *MF = MBB.getParent();
434   MachineRegisterInfo &MRI = MF->getRegInfo();
435   bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
436   Register SrcReg = RISCV::X0;
437   Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
438   unsigned Num = 0;
439 
440   if (!IsRV64 && !isInt<32>(Val))
441     report_fatal_error("Should only materialize 32-bit constants for RV32");
442 
443   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, IsRV64);
444   assert(Seq.size() > 0);
445 
446   for (RISCVMatInt::Inst &Inst : Seq) {
447     // Write the final result to DstReg if it's the last instruction in the Seq.
448     // Otherwise, write the result to the temp register.
449     if (++Num == Seq.size())
450       Result = DstReg;
451 
452     if (Inst.Opc == RISCV::LUI) {
453       BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
454           .addImm(Inst.Imm)
455           .setMIFlag(Flag);
456     } else {
457       BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
458           .addReg(SrcReg, RegState::Kill)
459           .addImm(Inst.Imm)
460           .setMIFlag(Flag);
461     }
462     // Only the first instruction has X0 as its source.
463     SrcReg = Result;
464   }
465 }
466 
467 // The contents of values added to Cond are not examined outside of
468 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
469 // push BranchOpcode, Reg1, Reg2.
470 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
471                             SmallVectorImpl<MachineOperand> &Cond) {
472   // Block ends with fall-through condbranch.
473   assert(LastInst.getDesc().isConditionalBranch() &&
474          "Unknown conditional branch");
475   Target = LastInst.getOperand(2).getMBB();
476   Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
477   Cond.push_back(LastInst.getOperand(0));
478   Cond.push_back(LastInst.getOperand(1));
479 }
480 
481 static unsigned getOppositeBranchOpcode(int Opc) {
482   switch (Opc) {
483   default:
484     llvm_unreachable("Unrecognized conditional branch");
485   case RISCV::BEQ:
486     return RISCV::BNE;
487   case RISCV::BNE:
488     return RISCV::BEQ;
489   case RISCV::BLT:
490     return RISCV::BGE;
491   case RISCV::BGE:
492     return RISCV::BLT;
493   case RISCV::BLTU:
494     return RISCV::BGEU;
495   case RISCV::BGEU:
496     return RISCV::BLTU;
497   }
498 }
499 
500 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
501                                    MachineBasicBlock *&TBB,
502                                    MachineBasicBlock *&FBB,
503                                    SmallVectorImpl<MachineOperand> &Cond,
504                                    bool AllowModify) const {
505   TBB = FBB = nullptr;
506   Cond.clear();
507 
508   // If the block has no terminators, it just falls into the block after it.
509   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
510   if (I == MBB.end() || !isUnpredicatedTerminator(*I))
511     return false;
512 
513   // Count the number of terminators and find the first unconditional or
514   // indirect branch.
515   MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
516   int NumTerminators = 0;
517   for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
518        J++) {
519     NumTerminators++;
520     if (J->getDesc().isUnconditionalBranch() ||
521         J->getDesc().isIndirectBranch()) {
522       FirstUncondOrIndirectBr = J.getReverse();
523     }
524   }
525 
526   // If AllowModify is true, we can erase any terminators after
527   // FirstUncondOrIndirectBR.
528   if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
529     while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
530       std::next(FirstUncondOrIndirectBr)->eraseFromParent();
531       NumTerminators--;
532     }
533     I = FirstUncondOrIndirectBr;
534   }
535 
536   // We can't handle blocks that end in an indirect branch.
537   if (I->getDesc().isIndirectBranch())
538     return true;
539 
540   // We can't handle blocks with more than 2 terminators.
541   if (NumTerminators > 2)
542     return true;
543 
544   // Handle a single unconditional branch.
545   if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
546     TBB = getBranchDestBlock(*I);
547     return false;
548   }
549 
550   // Handle a single conditional branch.
551   if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
552     parseCondBranch(*I, TBB, Cond);
553     return false;
554   }
555 
556   // Handle a conditional branch followed by an unconditional branch.
557   if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
558       I->getDesc().isUnconditionalBranch()) {
559     parseCondBranch(*std::prev(I), TBB, Cond);
560     FBB = getBranchDestBlock(*I);
561     return false;
562   }
563 
564   // Otherwise, we can't handle this.
565   return true;
566 }
567 
568 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
569                                       int *BytesRemoved) const {
570   if (BytesRemoved)
571     *BytesRemoved = 0;
572   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
573   if (I == MBB.end())
574     return 0;
575 
576   if (!I->getDesc().isUnconditionalBranch() &&
577       !I->getDesc().isConditionalBranch())
578     return 0;
579 
580   // Remove the branch.
581   if (BytesRemoved)
582     *BytesRemoved += getInstSizeInBytes(*I);
583   I->eraseFromParent();
584 
585   I = MBB.end();
586 
587   if (I == MBB.begin())
588     return 1;
589   --I;
590   if (!I->getDesc().isConditionalBranch())
591     return 1;
592 
593   // Remove the branch.
594   if (BytesRemoved)
595     *BytesRemoved += getInstSizeInBytes(*I);
596   I->eraseFromParent();
597   return 2;
598 }
599 
600 // Inserts a branch into the end of the specific MachineBasicBlock, returning
601 // the number of instructions inserted.
602 unsigned RISCVInstrInfo::insertBranch(
603     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
604     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
605   if (BytesAdded)
606     *BytesAdded = 0;
607 
608   // Shouldn't be a fall through.
609   assert(TBB && "insertBranch must not be told to insert a fallthrough");
610   assert((Cond.size() == 3 || Cond.size() == 0) &&
611          "RISCV branch conditions have two components!");
612 
613   // Unconditional branch.
614   if (Cond.empty()) {
615     MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
616     if (BytesAdded)
617       *BytesAdded += getInstSizeInBytes(MI);
618     return 1;
619   }
620 
621   // Either a one or two-way conditional branch.
622   unsigned Opc = Cond[0].getImm();
623   MachineInstr &CondMI =
624       *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
625   if (BytesAdded)
626     *BytesAdded += getInstSizeInBytes(CondMI);
627 
628   // One-way conditional branch.
629   if (!FBB)
630     return 1;
631 
632   // Two-way conditional branch.
633   MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
634   if (BytesAdded)
635     *BytesAdded += getInstSizeInBytes(MI);
636   return 2;
637 }
638 
639 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
640                                               MachineBasicBlock &DestBB,
641                                               const DebugLoc &DL,
642                                               int64_t BrOffset,
643                                               RegScavenger *RS) const {
644   assert(RS && "RegScavenger required for long branching");
645   assert(MBB.empty() &&
646          "new block should be inserted for expanding unconditional branch");
647   assert(MBB.pred_size() == 1);
648 
649   MachineFunction *MF = MBB.getParent();
650   MachineRegisterInfo &MRI = MF->getRegInfo();
651 
652   if (!isInt<32>(BrOffset))
653     report_fatal_error(
654         "Branch offsets outside of the signed 32-bit range not supported");
655 
656   // FIXME: A virtual register must be used initially, as the register
657   // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
658   // uses the same workaround).
659   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
660   auto II = MBB.end();
661 
662   MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
663                           .addReg(ScratchReg, RegState::Define | RegState::Dead)
664                           .addMBB(&DestBB, RISCVII::MO_CALL);
665 
666   RS->enterBasicBlockEnd(MBB);
667   unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
668                                                 MI.getIterator(), false, 0);
669   MRI.replaceRegWith(ScratchReg, Scav);
670   MRI.clearVirtRegs();
671   RS->setRegUsed(Scav);
672   return 8;
673 }
674 
675 bool RISCVInstrInfo::reverseBranchCondition(
676     SmallVectorImpl<MachineOperand> &Cond) const {
677   assert((Cond.size() == 3) && "Invalid branch condition!");
678   Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
679   return false;
680 }
681 
682 MachineBasicBlock *
683 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
684   assert(MI.getDesc().isBranch() && "Unexpected opcode!");
685   // The branch target is always the last operand.
686   int NumOp = MI.getNumExplicitOperands();
687   return MI.getOperand(NumOp - 1).getMBB();
688 }
689 
690 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
691                                            int64_t BrOffset) const {
692   unsigned XLen = STI.getXLen();
693   // Ideally we could determine the supported branch offset from the
694   // RISCVII::FormMask, but this can't be used for Pseudo instructions like
695   // PseudoBR.
696   switch (BranchOp) {
697   default:
698     llvm_unreachable("Unexpected opcode!");
699   case RISCV::BEQ:
700   case RISCV::BNE:
701   case RISCV::BLT:
702   case RISCV::BGE:
703   case RISCV::BLTU:
704   case RISCV::BGEU:
705     return isIntN(13, BrOffset);
706   case RISCV::JAL:
707   case RISCV::PseudoBR:
708     return isIntN(21, BrOffset);
709   case RISCV::PseudoJump:
710     return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
711   }
712 }
713 
714 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
715   unsigned Opcode = MI.getOpcode();
716 
717   switch (Opcode) {
718   default: {
719     if (MI.getParent() && MI.getParent()->getParent()) {
720       const auto MF = MI.getMF();
721       const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
722       const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
723       const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
724       const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
725       if (isCompressibleInst(MI, &ST, MRI, STI))
726         return 2;
727     }
728     return get(Opcode).getSize();
729   }
730   case TargetOpcode::EH_LABEL:
731   case TargetOpcode::IMPLICIT_DEF:
732   case TargetOpcode::KILL:
733   case TargetOpcode::DBG_VALUE:
734     return 0;
735   // These values are determined based on RISCVExpandAtomicPseudoInsts,
736   // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
737   // pseudos are expanded.
738   case RISCV::PseudoCALLReg:
739   case RISCV::PseudoCALL:
740   case RISCV::PseudoJump:
741   case RISCV::PseudoTAIL:
742   case RISCV::PseudoLLA:
743   case RISCV::PseudoLA:
744   case RISCV::PseudoLA_TLS_IE:
745   case RISCV::PseudoLA_TLS_GD:
746     return 8;
747   case RISCV::PseudoAtomicLoadNand32:
748   case RISCV::PseudoAtomicLoadNand64:
749     return 20;
750   case RISCV::PseudoMaskedAtomicSwap32:
751   case RISCV::PseudoMaskedAtomicLoadAdd32:
752   case RISCV::PseudoMaskedAtomicLoadSub32:
753     return 28;
754   case RISCV::PseudoMaskedAtomicLoadNand32:
755     return 32;
756   case RISCV::PseudoMaskedAtomicLoadMax32:
757   case RISCV::PseudoMaskedAtomicLoadMin32:
758     return 44;
759   case RISCV::PseudoMaskedAtomicLoadUMax32:
760   case RISCV::PseudoMaskedAtomicLoadUMin32:
761     return 36;
762   case RISCV::PseudoCmpXchg32:
763   case RISCV::PseudoCmpXchg64:
764     return 16;
765   case RISCV::PseudoMaskedCmpXchg32:
766     return 32;
767   case TargetOpcode::INLINEASM:
768   case TargetOpcode::INLINEASM_BR: {
769     const MachineFunction &MF = *MI.getParent()->getParent();
770     const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
771     return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
772                               *TM.getMCAsmInfo());
773   }
774   case RISCV::PseudoVSPILL2_M1:
775   case RISCV::PseudoVSPILL2_M2:
776   case RISCV::PseudoVSPILL2_M4:
777   case RISCV::PseudoVSPILL3_M1:
778   case RISCV::PseudoVSPILL3_M2:
779   case RISCV::PseudoVSPILL4_M1:
780   case RISCV::PseudoVSPILL4_M2:
781   case RISCV::PseudoVSPILL5_M1:
782   case RISCV::PseudoVSPILL6_M1:
783   case RISCV::PseudoVSPILL7_M1:
784   case RISCV::PseudoVSPILL8_M1:
785   case RISCV::PseudoVRELOAD2_M1:
786   case RISCV::PseudoVRELOAD2_M2:
787   case RISCV::PseudoVRELOAD2_M4:
788   case RISCV::PseudoVRELOAD3_M1:
789   case RISCV::PseudoVRELOAD3_M2:
790   case RISCV::PseudoVRELOAD4_M1:
791   case RISCV::PseudoVRELOAD4_M2:
792   case RISCV::PseudoVRELOAD5_M1:
793   case RISCV::PseudoVRELOAD6_M1:
794   case RISCV::PseudoVRELOAD7_M1:
795   case RISCV::PseudoVRELOAD8_M1: {
796     // The values are determined based on expandVSPILL and expandVRELOAD that
797     // expand the pseudos depending on NF.
798     unsigned NF = isRVVSpillForZvlsseg(Opcode)->first;
799     return 4 * (2 * NF - 1);
800   }
801   }
802 }
803 
804 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
805   const unsigned Opcode = MI.getOpcode();
806   switch (Opcode) {
807   default:
808     break;
809   case RISCV::FSGNJ_D:
810   case RISCV::FSGNJ_S:
811     // The canonical floating-point move is fsgnj rd, rs, rs.
812     return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
813            MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
814   case RISCV::ADDI:
815   case RISCV::ORI:
816   case RISCV::XORI:
817     return (MI.getOperand(1).isReg() &&
818             MI.getOperand(1).getReg() == RISCV::X0) ||
819            (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
820   }
821   return MI.isAsCheapAsAMove();
822 }
823 
824 Optional<DestSourcePair>
825 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
826   if (MI.isMoveReg())
827     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
828   switch (MI.getOpcode()) {
829   default:
830     break;
831   case RISCV::ADDI:
832     // Operand 1 can be a frameindex but callers expect registers
833     if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
834         MI.getOperand(2).getImm() == 0)
835       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
836     break;
837   case RISCV::FSGNJ_D:
838   case RISCV::FSGNJ_S:
839     // The canonical floating-point move is fsgnj rd, rs, rs.
840     if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
841         MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
842       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
843     break;
844   }
845   return None;
846 }
847 
848 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
849                                        StringRef &ErrInfo) const {
850   const MCInstrInfo *MCII = STI.getInstrInfo();
851   MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
852 
853   for (auto &OI : enumerate(Desc.operands())) {
854     unsigned OpType = OI.value().OperandType;
855     if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
856         OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
857       const MachineOperand &MO = MI.getOperand(OI.index());
858       if (MO.isImm()) {
859         int64_t Imm = MO.getImm();
860         bool Ok;
861         switch (OpType) {
862         default:
863           llvm_unreachable("Unexpected operand type");
864         case RISCVOp::OPERAND_UIMM4:
865           Ok = isUInt<4>(Imm);
866           break;
867         case RISCVOp::OPERAND_UIMM5:
868           Ok = isUInt<5>(Imm);
869           break;
870         case RISCVOp::OPERAND_UIMM12:
871           Ok = isUInt<12>(Imm);
872           break;
873         case RISCVOp::OPERAND_SIMM12:
874           Ok = isInt<12>(Imm);
875           break;
876         case RISCVOp::OPERAND_UIMM20:
877           Ok = isUInt<20>(Imm);
878           break;
879         case RISCVOp::OPERAND_UIMMLOG2XLEN:
880           if (STI.getTargetTriple().isArch64Bit())
881             Ok = isUInt<6>(Imm);
882           else
883             Ok = isUInt<5>(Imm);
884           break;
885         }
886         if (!Ok) {
887           ErrInfo = "Invalid immediate";
888           return false;
889         }
890       }
891     }
892   }
893 
894   return true;
895 }
896 
897 // Return true if get the base operand, byte offset of an instruction and the
898 // memory width. Width is the size of memory that is being loaded/stored.
899 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
900     const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
901     unsigned &Width, const TargetRegisterInfo *TRI) const {
902   if (!LdSt.mayLoadOrStore())
903     return false;
904 
905   // Here we assume the standard RISC-V ISA, which uses a base+offset
906   // addressing mode. You'll need to relax these conditions to support custom
907   // load/stores instructions.
908   if (LdSt.getNumExplicitOperands() != 3)
909     return false;
910   if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
911     return false;
912 
913   if (!LdSt.hasOneMemOperand())
914     return false;
915 
916   Width = (*LdSt.memoperands_begin())->getSize();
917   BaseReg = &LdSt.getOperand(1);
918   Offset = LdSt.getOperand(2).getImm();
919   return true;
920 }
921 
922 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
923     const MachineInstr &MIa, const MachineInstr &MIb) const {
924   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
925   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
926 
927   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
928       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
929     return false;
930 
931   // Retrieve the base register, offset from the base register and width. Width
932   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
933   // base registers are identical, and the offset of a lower memory access +
934   // the width doesn't overlap the offset of a higher memory access,
935   // then the memory accesses are different.
936   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
937   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
938   int64_t OffsetA = 0, OffsetB = 0;
939   unsigned int WidthA = 0, WidthB = 0;
940   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
941       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
942     if (BaseOpA->isIdenticalTo(*BaseOpB)) {
943       int LowOffset = std::min(OffsetA, OffsetB);
944       int HighOffset = std::max(OffsetA, OffsetB);
945       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
946       if (LowOffset + LowWidth <= HighOffset)
947         return true;
948     }
949   }
950   return false;
951 }
952 
953 std::pair<unsigned, unsigned>
954 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
955   const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
956   return std::make_pair(TF & Mask, TF & ~Mask);
957 }
958 
959 ArrayRef<std::pair<unsigned, const char *>>
960 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
961   using namespace RISCVII;
962   static const std::pair<unsigned, const char *> TargetFlags[] = {
963       {MO_CALL, "riscv-call"},
964       {MO_PLT, "riscv-plt"},
965       {MO_LO, "riscv-lo"},
966       {MO_HI, "riscv-hi"},
967       {MO_PCREL_LO, "riscv-pcrel-lo"},
968       {MO_PCREL_HI, "riscv-pcrel-hi"},
969       {MO_GOT_HI, "riscv-got-hi"},
970       {MO_TPREL_LO, "riscv-tprel-lo"},
971       {MO_TPREL_HI, "riscv-tprel-hi"},
972       {MO_TPREL_ADD, "riscv-tprel-add"},
973       {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
974       {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
975   return makeArrayRef(TargetFlags);
976 }
977 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
978     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
979   const Function &F = MF.getFunction();
980 
981   // Can F be deduplicated by the linker? If it can, don't outline from it.
982   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
983     return false;
984 
985   // Don't outline from functions with section markings; the program could
986   // expect that all the code is in the named section.
987   if (F.hasSection())
988     return false;
989 
990   // It's safe to outline from MF.
991   return true;
992 }
993 
994 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
995                                             unsigned &Flags) const {
996   // More accurate safety checking is done in getOutliningCandidateInfo.
997   return true;
998 }
999 
1000 // Enum values indicating how an outlined call should be constructed.
1001 enum MachineOutlinerConstructionID {
1002   MachineOutlinerDefault
1003 };
1004 
1005 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
1006     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1007 
1008   // First we need to filter out candidates where the X5 register (IE t0) can't
1009   // be used to setup the function call.
1010   auto CannotInsertCall = [](outliner::Candidate &C) {
1011     const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1012 
1013     C.initLRU(*TRI);
1014     LiveRegUnits LRU = C.LRU;
1015     return !LRU.available(RISCV::X5);
1016   };
1017 
1018   llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1019 
1020   // If the sequence doesn't have enough candidates left, then we're done.
1021   if (RepeatedSequenceLocs.size() < 2)
1022     return outliner::OutlinedFunction();
1023 
1024   unsigned SequenceSize = 0;
1025 
1026   auto I = RepeatedSequenceLocs[0].front();
1027   auto E = std::next(RepeatedSequenceLocs[0].back());
1028   for (; I != E; ++I)
1029     SequenceSize += getInstSizeInBytes(*I);
1030 
1031   // call t0, function = 8 bytes.
1032   unsigned CallOverhead = 8;
1033   for (auto &C : RepeatedSequenceLocs)
1034     C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1035 
1036   // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1037   unsigned FrameOverhead = 4;
1038   if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1039           .getFeatureBits()[RISCV::FeatureStdExtC])
1040     FrameOverhead = 2;
1041 
1042   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1043                                     FrameOverhead, MachineOutlinerDefault);
1044 }
1045 
1046 outliner::InstrType
1047 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
1048                                  unsigned Flags) const {
1049   MachineInstr &MI = *MBBI;
1050   MachineBasicBlock *MBB = MI.getParent();
1051   const TargetRegisterInfo *TRI =
1052       MBB->getParent()->getSubtarget().getRegisterInfo();
1053 
1054   // Positions generally can't safely be outlined.
1055   if (MI.isPosition()) {
1056     // We can manually strip out CFI instructions later.
1057     if (MI.isCFIInstruction())
1058       return outliner::InstrType::Invisible;
1059 
1060     return outliner::InstrType::Illegal;
1061   }
1062 
1063   // Don't trust the user to write safe inline assembly.
1064   if (MI.isInlineAsm())
1065     return outliner::InstrType::Illegal;
1066 
1067   // We can't outline branches to other basic blocks.
1068   if (MI.isTerminator() && !MBB->succ_empty())
1069     return outliner::InstrType::Illegal;
1070 
1071   // We need support for tail calls to outlined functions before return
1072   // statements can be allowed.
1073   if (MI.isReturn())
1074     return outliner::InstrType::Illegal;
1075 
1076   // Don't allow modifying the X5 register which we use for return addresses for
1077   // these outlined functions.
1078   if (MI.modifiesRegister(RISCV::X5, TRI) ||
1079       MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1080     return outliner::InstrType::Illegal;
1081 
1082   // Make sure the operands don't reference something unsafe.
1083   for (const auto &MO : MI.operands())
1084     if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
1085       return outliner::InstrType::Illegal;
1086 
1087   // Don't allow instructions which won't be materialized to impact outlining
1088   // analysis.
1089   if (MI.isMetaInstruction())
1090     return outliner::InstrType::Invisible;
1091 
1092   return outliner::InstrType::Legal;
1093 }
1094 
1095 void RISCVInstrInfo::buildOutlinedFrame(
1096     MachineBasicBlock &MBB, MachineFunction &MF,
1097     const outliner::OutlinedFunction &OF) const {
1098 
1099   // Strip out any CFI instructions
1100   bool Changed = true;
1101   while (Changed) {
1102     Changed = false;
1103     auto I = MBB.begin();
1104     auto E = MBB.end();
1105     for (; I != E; ++I) {
1106       if (I->isCFIInstruction()) {
1107         I->removeFromParent();
1108         Changed = true;
1109         break;
1110       }
1111     }
1112   }
1113 
1114   MBB.addLiveIn(RISCV::X5);
1115 
1116   // Add in a return instruction to the end of the outlined frame.
1117   MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1118       .addReg(RISCV::X0, RegState::Define)
1119       .addReg(RISCV::X5)
1120       .addImm(0));
1121 }
1122 
1123 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
1124     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
1125     MachineFunction &MF, const outliner::Candidate &C) const {
1126 
1127   // Add in a call instruction to the outlined function at the given location.
1128   It = MBB.insert(It,
1129                   BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1130                       .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1131                                         RISCVII::MO_CALL));
1132   return It;
1133 }
1134 
1135 // clang-format off
1136 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL)                                \
1137   RISCV::PseudoV##OP##_##TYPE##_##LMUL##_COMMUTABLE
1138 
1139 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE)                                       \
1140   CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8):                                      \
1141   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4):                                 \
1142   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2):                                 \
1143   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1):                                  \
1144   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2):                                  \
1145   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4):                                  \
1146   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1147 
1148 #define CASE_VFMA_SPLATS(OP)                                                   \
1149   CASE_VFMA_OPCODE_LMULS(OP, VF16):                                            \
1150   case CASE_VFMA_OPCODE_LMULS(OP, VF32):                                       \
1151   case CASE_VFMA_OPCODE_LMULS(OP, VF64)
1152 // clang-format on
1153 
1154 bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
1155                                            unsigned &SrcOpIdx1,
1156                                            unsigned &SrcOpIdx2) const {
1157   const MCInstrDesc &Desc = MI.getDesc();
1158   if (!Desc.isCommutable())
1159     return false;
1160 
1161   switch (MI.getOpcode()) {
1162   case CASE_VFMA_SPLATS(FMADD):
1163   case CASE_VFMA_SPLATS(FMSUB):
1164   case CASE_VFMA_SPLATS(FMACC):
1165   case CASE_VFMA_SPLATS(FMSAC):
1166   case CASE_VFMA_SPLATS(FNMADD):
1167   case CASE_VFMA_SPLATS(FNMSUB):
1168   case CASE_VFMA_SPLATS(FNMACC):
1169   case CASE_VFMA_SPLATS(FNMSAC):
1170   case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1171   case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1172   case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1173   case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
1174     // For these instructions we can only swap operand 1 and operand 3 by
1175     // changing the opcode.
1176     unsigned CommutableOpIdx1 = 1;
1177     unsigned CommutableOpIdx2 = 3;
1178     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1179                               CommutableOpIdx2))
1180       return false;
1181     return true;
1182   }
1183   case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1184   case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1185   case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1186   case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
1187     // For these instructions we have more freedom. We can commute with the
1188     // other multiplicand or with the addend/subtrahend/minuend.
1189 
1190     // Any fixed operand must be from source 1, 2 or 3.
1191     if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1192       return false;
1193     if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1194       return false;
1195 
1196     // It both ops are fixed one must be the tied source.
1197     if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1198         SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1199       return false;
1200 
1201     // Look for two different register operands assumed to be commutable
1202     // regardless of the FMA opcode. The FMA opcode is adjusted later if
1203     // needed.
1204     if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1205         SrcOpIdx2 == CommuteAnyOperandIndex) {
1206       // At least one of operands to be commuted is not specified and
1207       // this method is free to choose appropriate commutable operands.
1208       unsigned CommutableOpIdx1 = SrcOpIdx1;
1209       if (SrcOpIdx1 == SrcOpIdx2) {
1210         // Both of operands are not fixed. Set one of commutable
1211         // operands to the tied source.
1212         CommutableOpIdx1 = 1;
1213       } else if (SrcOpIdx1 == CommutableOpIdx1) {
1214         // Only one of the operands is not fixed.
1215         CommutableOpIdx1 = SrcOpIdx2;
1216       }
1217 
1218       // CommutableOpIdx1 is well defined now. Let's choose another commutable
1219       // operand and assign its index to CommutableOpIdx2.
1220       unsigned CommutableOpIdx2;
1221       if (CommutableOpIdx1 != 1) {
1222         // If we haven't already used the tied source, we must use it now.
1223         CommutableOpIdx2 = 1;
1224       } else {
1225         Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1226 
1227         // The commuted operands should have different registers.
1228         // Otherwise, the commute transformation does not change anything and
1229         // is useless. We use this as a hint to make our decision.
1230         if (Op1Reg != MI.getOperand(2).getReg())
1231           CommutableOpIdx2 = 2;
1232         else
1233           CommutableOpIdx2 = 3;
1234       }
1235 
1236       // Assign the found pair of commutable indices to SrcOpIdx1 and
1237       // SrcOpIdx2 to return those values.
1238       if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1239                                 CommutableOpIdx2))
1240         return false;
1241     }
1242 
1243     return true;
1244   }
1245   }
1246 
1247   return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1248 }
1249 
1250 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL)               \
1251   case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_COMMUTABLE:                   \
1252     Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_COMMUTABLE;                \
1253     break;
1254 
1255 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)                      \
1256   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8)                      \
1257   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4)                      \
1258   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2)                      \
1259   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1)                       \
1260   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2)                       \
1261   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4)                       \
1262   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1263 
1264 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)                           \
1265   CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16)                            \
1266   CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32)                            \
1267   CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
1268 
1269 MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
1270                                                      bool NewMI,
1271                                                      unsigned OpIdx1,
1272                                                      unsigned OpIdx2) const {
1273   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1274     if (NewMI)
1275       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1276     return MI;
1277   };
1278 
1279   switch (MI.getOpcode()) {
1280   case CASE_VFMA_SPLATS(FMACC):
1281   case CASE_VFMA_SPLATS(FMADD):
1282   case CASE_VFMA_SPLATS(FMSAC):
1283   case CASE_VFMA_SPLATS(FMSUB):
1284   case CASE_VFMA_SPLATS(FNMACC):
1285   case CASE_VFMA_SPLATS(FNMADD):
1286   case CASE_VFMA_SPLATS(FNMSAC):
1287   case CASE_VFMA_SPLATS(FNMSUB):
1288   case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1289   case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1290   case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1291   case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
1292     // It only make sense to toggle these between clobbering the
1293     // addend/subtrahend/minuend one of the multiplicands.
1294     assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1295     assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1296     unsigned Opc;
1297     switch (MI.getOpcode()) {
1298       default:
1299         llvm_unreachable("Unexpected opcode");
1300       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1301       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1302       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
1303       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
1304       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
1305       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
1306       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
1307       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
1308       CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
1309       CASE_VFMA_CHANGE_OPCODE_LMULS(FMSAC, FMSUB, VV)
1310       CASE_VFMA_CHANGE_OPCODE_LMULS(FNMACC, FNMADD, VV)
1311       CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSAC, FNMSUB, VV)
1312     }
1313 
1314     auto &WorkingMI = cloneIfNew(MI);
1315     WorkingMI.setDesc(get(Opc));
1316     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1317                                                    OpIdx1, OpIdx2);
1318   }
1319   case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1320   case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1321   case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1322   case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
1323     assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1324     // If one of the operands, is the addend we need to change opcode.
1325     // Otherwise we're just swapping 2 of the multiplicands.
1326     if (OpIdx1 == 3 || OpIdx2 == 3) {
1327       unsigned Opc;
1328       switch (MI.getOpcode()) {
1329         default:
1330           llvm_unreachable("Unexpected opcode");
1331         CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
1332         CASE_VFMA_CHANGE_OPCODE_LMULS(FMSUB, FMSAC, VV)
1333         CASE_VFMA_CHANGE_OPCODE_LMULS(FNMADD, FNMACC, VV)
1334         CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSUB, FNMSAC, VV)
1335       }
1336 
1337       auto &WorkingMI = cloneIfNew(MI);
1338       WorkingMI.setDesc(get(Opc));
1339       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1340                                                      OpIdx1, OpIdx2);
1341     }
1342     // Let the default code handle it.
1343     break;
1344   }
1345   }
1346 
1347   return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1348 }
1349 
1350 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1351 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1352 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1353 #undef CASE_VFMA_SPLATS
1354 #undef CASE_VFMA_OPCODE_LMULS
1355 #undef CASE_VFMA_OPCODE_COMMON
1356 
1357 // clang-format off
1358 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL)                                    \
1359   RISCV::PseudoV##OP##_##LMUL##_TIED
1360 
1361 #define CASE_WIDEOP_OPCODE_LMULS(OP)                                           \
1362   CASE_WIDEOP_OPCODE_COMMON(OP, MF8):                                          \
1363   case CASE_WIDEOP_OPCODE_COMMON(OP, MF4):                                     \
1364   case CASE_WIDEOP_OPCODE_COMMON(OP, MF2):                                     \
1365   case CASE_WIDEOP_OPCODE_COMMON(OP, M1):                                      \
1366   case CASE_WIDEOP_OPCODE_COMMON(OP, M2):                                      \
1367   case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1368 // clang-format on
1369 
1370 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL)                             \
1371   case RISCV::PseudoV##OP##_##LMUL##_TIED:                                     \
1372     NewOpc = RISCV::PseudoV##OP##_##LMUL;                                      \
1373     break;
1374 
1375 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)                                    \
1376   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8)                                    \
1377   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4)                                    \
1378   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2)                                    \
1379   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1)                                     \
1380   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2)                                     \
1381   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1382 
1383 MachineInstr *RISCVInstrInfo::convertToThreeAddress(
1384     MachineFunction::iterator &MBB, MachineInstr &MI, LiveVariables *LV) const {
1385   switch (MI.getOpcode()) {
1386   default:
1387     break;
1388   case CASE_WIDEOP_OPCODE_LMULS(FWADD_WV):
1389   case CASE_WIDEOP_OPCODE_LMULS(FWSUB_WV):
1390   case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1391   case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1392   case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1393   case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1394     // clang-format off
1395     unsigned NewOpc;
1396     switch (MI.getOpcode()) {
1397     default:
1398       llvm_unreachable("Unexpected opcode");
1399     CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
1400     CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
1401     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
1402     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
1403     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
1404     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
1405     }
1406     //clang-format on
1407 
1408     MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
1409                                   .add(MI.getOperand(0))
1410                                   .add(MI.getOperand(1))
1411                                   .add(MI.getOperand(2))
1412                                   .add(MI.getOperand(3))
1413                                   .add(MI.getOperand(4));
1414     MIB.copyImplicitOps(MI);
1415 
1416     if (LV) {
1417       unsigned NumOps = MI.getNumOperands();
1418       for (unsigned I = 1; I < NumOps; ++I) {
1419         MachineOperand &Op = MI.getOperand(I);
1420         if (Op.isReg() && Op.isKill())
1421           LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1422       }
1423     }
1424 
1425     return MIB;
1426   }
1427   }
1428 
1429   return nullptr;
1430 }
1431 
1432 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1433 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1434 #undef CASE_WIDEOP_OPCODE_LMULS
1435 #undef CASE_WIDEOP_OPCODE_COMMON
1436 
1437 Register RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
1438                                                MachineBasicBlock &MBB,
1439                                                MachineBasicBlock::iterator II,
1440                                                const DebugLoc &DL,
1441                                                int64_t Amount) const {
1442   assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1443   assert(Amount % 8 == 0 &&
1444          "Reserve the stack by the multiple of one vector size.");
1445 
1446   MachineRegisterInfo &MRI = MF.getRegInfo();
1447   const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1448   int64_t NumOfVReg = Amount / 8;
1449 
1450   Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1451   BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
1452   assert(isInt<12>(NumOfVReg) &&
1453          "Expect the number of vector registers within 12-bits.");
1454   if (isPowerOf2_32(NumOfVReg)) {
1455     uint32_t ShiftAmount = Log2_32(NumOfVReg);
1456     if (ShiftAmount == 0)
1457       return VL;
1458     BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
1459         .addReg(VL, RegState::Kill)
1460         .addImm(ShiftAmount);
1461   } else if (isPowerOf2_32(NumOfVReg - 1)) {
1462     Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1463     uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1464     BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1465         .addReg(VL)
1466         .addImm(ShiftAmount);
1467     BuildMI(MBB, II, DL, TII->get(RISCV::ADD), VL)
1468         .addReg(ScaledRegister, RegState::Kill)
1469         .addReg(VL, RegState::Kill);
1470   } else if (isPowerOf2_32(NumOfVReg + 1)) {
1471     Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1472     uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1473     BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1474         .addReg(VL)
1475         .addImm(ShiftAmount);
1476     BuildMI(MBB, II, DL, TII->get(RISCV::SUB), VL)
1477         .addReg(ScaledRegister, RegState::Kill)
1478         .addReg(VL, RegState::Kill);
1479   } else {
1480     Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1481     BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), N)
1482         .addReg(RISCV::X0)
1483         .addImm(NumOfVReg);
1484     if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtM())
1485       MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
1486           MF.getFunction(),
1487           "M-extension must be enabled to calculate the vscaled size/offset."});
1488     BuildMI(MBB, II, DL, TII->get(RISCV::MUL), VL)
1489         .addReg(VL, RegState::Kill)
1490         .addReg(N, RegState::Kill);
1491   }
1492 
1493   return VL;
1494 }
1495 
1496 static bool isRVVWholeLoadStore(unsigned Opcode) {
1497   switch (Opcode) {
1498   default:
1499     return false;
1500   case RISCV::VS1R_V:
1501   case RISCV::VS2R_V:
1502   case RISCV::VS4R_V:
1503   case RISCV::VS8R_V:
1504   case RISCV::VL1RE8_V:
1505   case RISCV::VL2RE8_V:
1506   case RISCV::VL4RE8_V:
1507   case RISCV::VL8RE8_V:
1508   case RISCV::VL1RE16_V:
1509   case RISCV::VL2RE16_V:
1510   case RISCV::VL4RE16_V:
1511   case RISCV::VL8RE16_V:
1512   case RISCV::VL1RE32_V:
1513   case RISCV::VL2RE32_V:
1514   case RISCV::VL4RE32_V:
1515   case RISCV::VL8RE32_V:
1516   case RISCV::VL1RE64_V:
1517   case RISCV::VL2RE64_V:
1518   case RISCV::VL4RE64_V:
1519   case RISCV::VL8RE64_V:
1520     return true;
1521   }
1522 }
1523 
1524 bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
1525   // RVV lacks any support for immediate addressing for stack addresses, so be
1526   // conservative.
1527   unsigned Opcode = MI.getOpcode();
1528   if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1529       !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1530     return false;
1531   return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1532     return MO.isFI();
1533   });
1534 }
1535 
1536 Optional<std::pair<unsigned, unsigned>>
1537 RISCVInstrInfo::isRVVSpillForZvlsseg(unsigned Opcode) const {
1538   switch (Opcode) {
1539   default:
1540     return None;
1541   case RISCV::PseudoVSPILL2_M1:
1542   case RISCV::PseudoVRELOAD2_M1:
1543     return std::make_pair(2u, 1u);
1544   case RISCV::PseudoVSPILL2_M2:
1545   case RISCV::PseudoVRELOAD2_M2:
1546     return std::make_pair(2u, 2u);
1547   case RISCV::PseudoVSPILL2_M4:
1548   case RISCV::PseudoVRELOAD2_M4:
1549     return std::make_pair(2u, 4u);
1550   case RISCV::PseudoVSPILL3_M1:
1551   case RISCV::PseudoVRELOAD3_M1:
1552     return std::make_pair(3u, 1u);
1553   case RISCV::PseudoVSPILL3_M2:
1554   case RISCV::PseudoVRELOAD3_M2:
1555     return std::make_pair(3u, 2u);
1556   case RISCV::PseudoVSPILL4_M1:
1557   case RISCV::PseudoVRELOAD4_M1:
1558     return std::make_pair(4u, 1u);
1559   case RISCV::PseudoVSPILL4_M2:
1560   case RISCV::PseudoVRELOAD4_M2:
1561     return std::make_pair(4u, 2u);
1562   case RISCV::PseudoVSPILL5_M1:
1563   case RISCV::PseudoVRELOAD5_M1:
1564     return std::make_pair(5u, 1u);
1565   case RISCV::PseudoVSPILL6_M1:
1566   case RISCV::PseudoVRELOAD6_M1:
1567     return std::make_pair(6u, 1u);
1568   case RISCV::PseudoVSPILL7_M1:
1569   case RISCV::PseudoVRELOAD7_M1:
1570     return std::make_pair(7u, 1u);
1571   case RISCV::PseudoVSPILL8_M1:
1572   case RISCV::PseudoVRELOAD8_M1:
1573     return std::make_pair(8u, 1u);
1574   }
1575 }
1576