1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
14 #include "RISCV.h"
15 #include "RISCVSubtarget.h"
16 #include "RISCVTargetMachine.h"
17 #include "Utils/RISCVMatInt.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
26 
27 using namespace llvm;
28 
29 #define GEN_CHECK_COMPRESS_INSTR
30 #include "RISCVGenCompressInstEmitter.inc"
31 
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "RISCVGenInstrInfo.inc"
34 
35 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
36     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
37       STI(STI) {}
38 
39 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
40                                              int &FrameIndex) const {
41   switch (MI.getOpcode()) {
42   default:
43     return 0;
44   case RISCV::LB:
45   case RISCV::LBU:
46   case RISCV::LH:
47   case RISCV::LHU:
48   case RISCV::LW:
49   case RISCV::FLW:
50   case RISCV::LWU:
51   case RISCV::LD:
52   case RISCV::FLD:
53     break;
54   }
55 
56   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
57       MI.getOperand(2).getImm() == 0) {
58     FrameIndex = MI.getOperand(1).getIndex();
59     return MI.getOperand(0).getReg();
60   }
61 
62   return 0;
63 }
64 
65 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
66                                             int &FrameIndex) const {
67   switch (MI.getOpcode()) {
68   default:
69     return 0;
70   case RISCV::SB:
71   case RISCV::SH:
72   case RISCV::SW:
73   case RISCV::FSW:
74   case RISCV::SD:
75   case RISCV::FSD:
76     break;
77   }
78 
79   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
80       MI.getOperand(2).getImm() == 0) {
81     FrameIndex = MI.getOperand(1).getIndex();
82     return MI.getOperand(0).getReg();
83   }
84 
85   return 0;
86 }
87 
88 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
89                                  MachineBasicBlock::iterator MBBI,
90                                  const DebugLoc &DL, MCRegister DstReg,
91                                  MCRegister SrcReg, bool KillSrc) const {
92   if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
93     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
94         .addReg(SrcReg, getKillRegState(KillSrc))
95         .addImm(0);
96     return;
97   }
98 
99   // FPR->FPR copies
100   unsigned Opc;
101   if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
102     Opc = RISCV::FSGNJ_S;
103   else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
104     Opc = RISCV::FSGNJ_D;
105   else
106     llvm_unreachable("Impossible reg-to-reg copy");
107 
108   BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
109       .addReg(SrcReg, getKillRegState(KillSrc))
110       .addReg(SrcReg, getKillRegState(KillSrc));
111 }
112 
113 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
114                                          MachineBasicBlock::iterator I,
115                                          Register SrcReg, bool IsKill, int FI,
116                                          const TargetRegisterClass *RC,
117                                          const TargetRegisterInfo *TRI) const {
118   DebugLoc DL;
119   if (I != MBB.end())
120     DL = I->getDebugLoc();
121 
122   MachineFunction *MF = MBB.getParent();
123   const MachineFrameInfo &MFI = MF->getFrameInfo();
124   MachineMemOperand *MMO = MF->getMachineMemOperand(
125       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
126       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
127 
128   unsigned Opcode;
129   if (RISCV::GPRRegClass.hasSubClassEq(RC))
130     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
131              RISCV::SW : RISCV::SD;
132   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
133     Opcode = RISCV::FSW;
134   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
135     Opcode = RISCV::FSD;
136   else
137     llvm_unreachable("Can't store this register to stack slot");
138 
139   BuildMI(MBB, I, DL, get(Opcode))
140       .addReg(SrcReg, getKillRegState(IsKill))
141       .addFrameIndex(FI)
142       .addImm(0)
143       .addMemOperand(MMO);
144 }
145 
146 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
147                                           MachineBasicBlock::iterator I,
148                                           Register DstReg, int FI,
149                                           const TargetRegisterClass *RC,
150                                           const TargetRegisterInfo *TRI) const {
151   DebugLoc DL;
152   if (I != MBB.end())
153     DL = I->getDebugLoc();
154 
155   MachineFunction *MF = MBB.getParent();
156   const MachineFrameInfo &MFI = MF->getFrameInfo();
157   MachineMemOperand *MMO = MF->getMachineMemOperand(
158       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
159       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
160 
161   unsigned Opcode;
162   if (RISCV::GPRRegClass.hasSubClassEq(RC))
163     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
164              RISCV::LW : RISCV::LD;
165   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
166     Opcode = RISCV::FLW;
167   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
168     Opcode = RISCV::FLD;
169   else
170     llvm_unreachable("Can't load this register from stack slot");
171 
172   BuildMI(MBB, I, DL, get(Opcode), DstReg)
173     .addFrameIndex(FI)
174     .addImm(0)
175     .addMemOperand(MMO);
176 }
177 
178 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
179                             MachineBasicBlock::iterator MBBI,
180                             const DebugLoc &DL, Register DstReg, uint64_t Val,
181                             MachineInstr::MIFlag Flag) const {
182   MachineFunction *MF = MBB.getParent();
183   MachineRegisterInfo &MRI = MF->getRegInfo();
184   bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
185   Register SrcReg = RISCV::X0;
186   Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
187   unsigned Num = 0;
188 
189   if (!IsRV64 && !isInt<32>(Val))
190     report_fatal_error("Should only materialize 32-bit constants for RV32");
191 
192   RISCVMatInt::InstSeq Seq;
193   RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
194   assert(Seq.size() > 0);
195 
196   for (RISCVMatInt::Inst &Inst : Seq) {
197     // Write the final result to DstReg if it's the last instruction in the Seq.
198     // Otherwise, write the result to the temp register.
199     if (++Num == Seq.size())
200       Result = DstReg;
201 
202     if (Inst.Opc == RISCV::LUI) {
203       BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
204           .addImm(Inst.Imm)
205           .setMIFlag(Flag);
206     } else {
207       BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
208           .addReg(SrcReg, RegState::Kill)
209           .addImm(Inst.Imm)
210           .setMIFlag(Flag);
211     }
212     // Only the first instruction has X0 as its source.
213     SrcReg = Result;
214   }
215 }
216 
217 // The contents of values added to Cond are not examined outside of
218 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
219 // push BranchOpcode, Reg1, Reg2.
220 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
221                             SmallVectorImpl<MachineOperand> &Cond) {
222   // Block ends with fall-through condbranch.
223   assert(LastInst.getDesc().isConditionalBranch() &&
224          "Unknown conditional branch");
225   Target = LastInst.getOperand(2).getMBB();
226   Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
227   Cond.push_back(LastInst.getOperand(0));
228   Cond.push_back(LastInst.getOperand(1));
229 }
230 
231 static unsigned getOppositeBranchOpcode(int Opc) {
232   switch (Opc) {
233   default:
234     llvm_unreachable("Unrecognized conditional branch");
235   case RISCV::BEQ:
236     return RISCV::BNE;
237   case RISCV::BNE:
238     return RISCV::BEQ;
239   case RISCV::BLT:
240     return RISCV::BGE;
241   case RISCV::BGE:
242     return RISCV::BLT;
243   case RISCV::BLTU:
244     return RISCV::BGEU;
245   case RISCV::BGEU:
246     return RISCV::BLTU;
247   }
248 }
249 
250 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
251                                    MachineBasicBlock *&TBB,
252                                    MachineBasicBlock *&FBB,
253                                    SmallVectorImpl<MachineOperand> &Cond,
254                                    bool AllowModify) const {
255   TBB = FBB = nullptr;
256   Cond.clear();
257 
258   // If the block has no terminators, it just falls into the block after it.
259   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
260   if (I == MBB.end() || !isUnpredicatedTerminator(*I))
261     return false;
262 
263   // Count the number of terminators and find the first unconditional or
264   // indirect branch.
265   MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
266   int NumTerminators = 0;
267   for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
268        J++) {
269     NumTerminators++;
270     if (J->getDesc().isUnconditionalBranch() ||
271         J->getDesc().isIndirectBranch()) {
272       FirstUncondOrIndirectBr = J.getReverse();
273     }
274   }
275 
276   // If AllowModify is true, we can erase any terminators after
277   // FirstUncondOrIndirectBR.
278   if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
279     while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
280       std::next(FirstUncondOrIndirectBr)->eraseFromParent();
281       NumTerminators--;
282     }
283     I = FirstUncondOrIndirectBr;
284   }
285 
286   // We can't handle blocks that end in an indirect branch.
287   if (I->getDesc().isIndirectBranch())
288     return true;
289 
290   // We can't handle blocks with more than 2 terminators.
291   if (NumTerminators > 2)
292     return true;
293 
294   // Handle a single unconditional branch.
295   if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
296     TBB = getBranchDestBlock(*I);
297     return false;
298   }
299 
300   // Handle a single conditional branch.
301   if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
302     parseCondBranch(*I, TBB, Cond);
303     return false;
304   }
305 
306   // Handle a conditional branch followed by an unconditional branch.
307   if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
308       I->getDesc().isUnconditionalBranch()) {
309     parseCondBranch(*std::prev(I), TBB, Cond);
310     FBB = getBranchDestBlock(*I);
311     return false;
312   }
313 
314   // Otherwise, we can't handle this.
315   return true;
316 }
317 
318 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
319                                       int *BytesRemoved) const {
320   if (BytesRemoved)
321     *BytesRemoved = 0;
322   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
323   if (I == MBB.end())
324     return 0;
325 
326   if (!I->getDesc().isUnconditionalBranch() &&
327       !I->getDesc().isConditionalBranch())
328     return 0;
329 
330   // Remove the branch.
331   if (BytesRemoved)
332     *BytesRemoved += getInstSizeInBytes(*I);
333   I->eraseFromParent();
334 
335   I = MBB.end();
336 
337   if (I == MBB.begin())
338     return 1;
339   --I;
340   if (!I->getDesc().isConditionalBranch())
341     return 1;
342 
343   // Remove the branch.
344   if (BytesRemoved)
345     *BytesRemoved += getInstSizeInBytes(*I);
346   I->eraseFromParent();
347   return 2;
348 }
349 
350 // Inserts a branch into the end of the specific MachineBasicBlock, returning
351 // the number of instructions inserted.
352 unsigned RISCVInstrInfo::insertBranch(
353     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
354     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
355   if (BytesAdded)
356     *BytesAdded = 0;
357 
358   // Shouldn't be a fall through.
359   assert(TBB && "insertBranch must not be told to insert a fallthrough");
360   assert((Cond.size() == 3 || Cond.size() == 0) &&
361          "RISCV branch conditions have two components!");
362 
363   // Unconditional branch.
364   if (Cond.empty()) {
365     MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
366     if (BytesAdded)
367       *BytesAdded += getInstSizeInBytes(MI);
368     return 1;
369   }
370 
371   // Either a one or two-way conditional branch.
372   unsigned Opc = Cond[0].getImm();
373   MachineInstr &CondMI =
374       *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
375   if (BytesAdded)
376     *BytesAdded += getInstSizeInBytes(CondMI);
377 
378   // One-way conditional branch.
379   if (!FBB)
380     return 1;
381 
382   // Two-way conditional branch.
383   MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
384   if (BytesAdded)
385     *BytesAdded += getInstSizeInBytes(MI);
386   return 2;
387 }
388 
389 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
390                                               MachineBasicBlock &DestBB,
391                                               const DebugLoc &DL,
392                                               int64_t BrOffset,
393                                               RegScavenger *RS) const {
394   assert(RS && "RegScavenger required for long branching");
395   assert(MBB.empty() &&
396          "new block should be inserted for expanding unconditional branch");
397   assert(MBB.pred_size() == 1);
398 
399   MachineFunction *MF = MBB.getParent();
400   MachineRegisterInfo &MRI = MF->getRegInfo();
401 
402   if (!isInt<32>(BrOffset))
403     report_fatal_error(
404         "Branch offsets outside of the signed 32-bit range not supported");
405 
406   // FIXME: A virtual register must be used initially, as the register
407   // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
408   // uses the same workaround).
409   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
410   auto II = MBB.end();
411 
412   MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
413                           .addReg(ScratchReg, RegState::Define | RegState::Dead)
414                           .addMBB(&DestBB, RISCVII::MO_CALL);
415 
416   RS->enterBasicBlockEnd(MBB);
417   unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
418                                                 MI.getIterator(), false, 0);
419   MRI.replaceRegWith(ScratchReg, Scav);
420   MRI.clearVirtRegs();
421   RS->setRegUsed(Scav);
422   return 8;
423 }
424 
425 bool RISCVInstrInfo::reverseBranchCondition(
426     SmallVectorImpl<MachineOperand> &Cond) const {
427   assert((Cond.size() == 3) && "Invalid branch condition!");
428   Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
429   return false;
430 }
431 
432 MachineBasicBlock *
433 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
434   assert(MI.getDesc().isBranch() && "Unexpected opcode!");
435   // The branch target is always the last operand.
436   int NumOp = MI.getNumExplicitOperands();
437   return MI.getOperand(NumOp - 1).getMBB();
438 }
439 
440 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
441                                            int64_t BrOffset) const {
442   unsigned XLen = STI.getXLen();
443   // Ideally we could determine the supported branch offset from the
444   // RISCVII::FormMask, but this can't be used for Pseudo instructions like
445   // PseudoBR.
446   switch (BranchOp) {
447   default:
448     llvm_unreachable("Unexpected opcode!");
449   case RISCV::BEQ:
450   case RISCV::BNE:
451   case RISCV::BLT:
452   case RISCV::BGE:
453   case RISCV::BLTU:
454   case RISCV::BGEU:
455     return isIntN(13, BrOffset);
456   case RISCV::JAL:
457   case RISCV::PseudoBR:
458     return isIntN(21, BrOffset);
459   case RISCV::PseudoJump:
460     return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
461   }
462 }
463 
464 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
465   unsigned Opcode = MI.getOpcode();
466 
467   switch (Opcode) {
468   default: {
469     if (MI.getParent() && MI.getParent()->getParent()) {
470       const auto MF = MI.getMF();
471       const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
472       const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
473       const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
474       const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
475       if (isCompressibleInst(MI, &ST, MRI, STI))
476         return 2;
477     }
478     return get(Opcode).getSize();
479   }
480   case TargetOpcode::EH_LABEL:
481   case TargetOpcode::IMPLICIT_DEF:
482   case TargetOpcode::KILL:
483   case TargetOpcode::DBG_VALUE:
484     return 0;
485   // These values are determined based on RISCVExpandAtomicPseudoInsts,
486   // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
487   // pseudos are expanded.
488   case RISCV::PseudoCALLReg:
489   case RISCV::PseudoCALL:
490   case RISCV::PseudoJump:
491   case RISCV::PseudoTAIL:
492   case RISCV::PseudoLLA:
493   case RISCV::PseudoLA:
494   case RISCV::PseudoLA_TLS_IE:
495   case RISCV::PseudoLA_TLS_GD:
496     return 8;
497   case RISCV::PseudoAtomicLoadNand32:
498   case RISCV::PseudoAtomicLoadNand64:
499     return 20;
500   case RISCV::PseudoMaskedAtomicSwap32:
501   case RISCV::PseudoMaskedAtomicLoadAdd32:
502   case RISCV::PseudoMaskedAtomicLoadSub32:
503     return 28;
504   case RISCV::PseudoMaskedAtomicLoadNand32:
505     return 32;
506   case RISCV::PseudoMaskedAtomicLoadMax32:
507   case RISCV::PseudoMaskedAtomicLoadMin32:
508     return 44;
509   case RISCV::PseudoMaskedAtomicLoadUMax32:
510   case RISCV::PseudoMaskedAtomicLoadUMin32:
511     return 36;
512   case RISCV::PseudoCmpXchg32:
513   case RISCV::PseudoCmpXchg64:
514     return 16;
515   case RISCV::PseudoMaskedCmpXchg32:
516     return 32;
517   case TargetOpcode::INLINEASM:
518   case TargetOpcode::INLINEASM_BR: {
519     const MachineFunction &MF = *MI.getParent()->getParent();
520     const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
521     return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
522                               *TM.getMCAsmInfo());
523   }
524   }
525 }
526 
527 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
528   const unsigned Opcode = MI.getOpcode();
529   switch (Opcode) {
530   default:
531     break;
532   case RISCV::FSGNJ_D:
533   case RISCV::FSGNJ_S:
534     // The canonical floating-point move is fsgnj rd, rs, rs.
535     return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
536            MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
537   case RISCV::ADDI:
538   case RISCV::ORI:
539   case RISCV::XORI:
540     return (MI.getOperand(1).isReg() &&
541             MI.getOperand(1).getReg() == RISCV::X0) ||
542            (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
543   }
544   return MI.isAsCheapAsAMove();
545 }
546 
547 Optional<DestSourcePair>
548 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
549   if (MI.isMoveReg())
550     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
551   switch (MI.getOpcode()) {
552   default:
553     break;
554   case RISCV::ADDI:
555     // Operand 1 can be a frameindex but callers expect registers
556     if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
557         MI.getOperand(2).getImm() == 0)
558       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
559     break;
560   case RISCV::FSGNJ_D:
561   case RISCV::FSGNJ_S:
562     // The canonical floating-point move is fsgnj rd, rs, rs.
563     if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
564         MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
565       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
566     break;
567   }
568   return None;
569 }
570 
571 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
572                                        StringRef &ErrInfo) const {
573   const MCInstrInfo *MCII = STI.getInstrInfo();
574   MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
575 
576   for (auto &OI : enumerate(Desc.operands())) {
577     unsigned OpType = OI.value().OperandType;
578     if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
579         OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
580       const MachineOperand &MO = MI.getOperand(OI.index());
581       if (MO.isImm()) {
582         int64_t Imm = MO.getImm();
583         bool Ok;
584         switch (OpType) {
585         default:
586           llvm_unreachable("Unexpected operand type");
587         case RISCVOp::OPERAND_UIMM4:
588           Ok = isUInt<4>(Imm);
589           break;
590         case RISCVOp::OPERAND_UIMM5:
591           Ok = isUInt<5>(Imm);
592           break;
593         case RISCVOp::OPERAND_UIMM12:
594           Ok = isUInt<12>(Imm);
595           break;
596         case RISCVOp::OPERAND_SIMM12:
597           Ok = isInt<12>(Imm);
598           break;
599         case RISCVOp::OPERAND_SIMM13_LSB0:
600           Ok = isShiftedInt<12, 1>(Imm);
601           break;
602         case RISCVOp::OPERAND_UIMM20:
603           Ok = isUInt<20>(Imm);
604           break;
605         case RISCVOp::OPERAND_SIMM21_LSB0:
606           Ok = isShiftedInt<20, 1>(Imm);
607           break;
608         case RISCVOp::OPERAND_UIMMLOG2XLEN:
609           if (STI.getTargetTriple().isArch64Bit())
610             Ok = isUInt<6>(Imm);
611           else
612             Ok = isUInt<5>(Imm);
613           break;
614         }
615         if (!Ok) {
616           ErrInfo = "Invalid immediate";
617           return false;
618         }
619       }
620     }
621   }
622 
623   return true;
624 }
625 
626 // Return true if get the base operand, byte offset of an instruction and the
627 // memory width. Width is the size of memory that is being loaded/stored.
628 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
629     const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
630     unsigned &Width, const TargetRegisterInfo *TRI) const {
631   if (!LdSt.mayLoadOrStore())
632     return false;
633 
634   // Here we assume the standard RISC-V ISA, which uses a base+offset
635   // addressing mode. You'll need to relax these conditions to support custom
636   // load/stores instructions.
637   if (LdSt.getNumExplicitOperands() != 3)
638     return false;
639   if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
640     return false;
641 
642   if (!LdSt.hasOneMemOperand())
643     return false;
644 
645   Width = (*LdSt.memoperands_begin())->getSize();
646   BaseReg = &LdSt.getOperand(1);
647   Offset = LdSt.getOperand(2).getImm();
648   return true;
649 }
650 
651 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
652     const MachineInstr &MIa, const MachineInstr &MIb) const {
653   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
654   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
655 
656   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
657       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
658     return false;
659 
660   // Retrieve the base register, offset from the base register and width. Width
661   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
662   // base registers are identical, and the offset of a lower memory access +
663   // the width doesn't overlap the offset of a higher memory access,
664   // then the memory accesses are different.
665   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
666   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
667   int64_t OffsetA = 0, OffsetB = 0;
668   unsigned int WidthA = 0, WidthB = 0;
669   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
670       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
671     if (BaseOpA->isIdenticalTo(*BaseOpB)) {
672       int LowOffset = std::min(OffsetA, OffsetB);
673       int HighOffset = std::max(OffsetA, OffsetB);
674       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
675       if (LowOffset + LowWidth <= HighOffset)
676         return true;
677     }
678   }
679   return false;
680 }
681 
682 std::pair<unsigned, unsigned>
683 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
684   const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
685   return std::make_pair(TF & Mask, TF & ~Mask);
686 }
687 
688 ArrayRef<std::pair<unsigned, const char *>>
689 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
690   using namespace RISCVII;
691   static const std::pair<unsigned, const char *> TargetFlags[] = {
692       {MO_CALL, "riscv-call"},
693       {MO_PLT, "riscv-plt"},
694       {MO_LO, "riscv-lo"},
695       {MO_HI, "riscv-hi"},
696       {MO_PCREL_LO, "riscv-pcrel-lo"},
697       {MO_PCREL_HI, "riscv-pcrel-hi"},
698       {MO_GOT_HI, "riscv-got-hi"},
699       {MO_TPREL_LO, "riscv-tprel-lo"},
700       {MO_TPREL_HI, "riscv-tprel-hi"},
701       {MO_TPREL_ADD, "riscv-tprel-add"},
702       {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
703       {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
704   return makeArrayRef(TargetFlags);
705 }
706 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
707     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
708   const Function &F = MF.getFunction();
709 
710   // Can F be deduplicated by the linker? If it can, don't outline from it.
711   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
712     return false;
713 
714   // Don't outline from functions with section markings; the program could
715   // expect that all the code is in the named section.
716   if (F.hasSection())
717     return false;
718 
719   // It's safe to outline from MF.
720   return true;
721 }
722 
723 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
724                                             unsigned &Flags) const {
725   // More accurate safety checking is done in getOutliningCandidateInfo.
726   return true;
727 }
728 
729 // Enum values indicating how an outlined call should be constructed.
730 enum MachineOutlinerConstructionID {
731   MachineOutlinerDefault
732 };
733 
734 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
735     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
736 
737   // First we need to filter out candidates where the X5 register (IE t0) can't
738   // be used to setup the function call.
739   auto CannotInsertCall = [](outliner::Candidate &C) {
740     const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
741 
742     C.initLRU(*TRI);
743     LiveRegUnits LRU = C.LRU;
744     return !LRU.available(RISCV::X5);
745   };
746 
747   RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
748                                             RepeatedSequenceLocs.end(),
749                                             CannotInsertCall),
750                              RepeatedSequenceLocs.end());
751 
752   // If the sequence doesn't have enough candidates left, then we're done.
753   if (RepeatedSequenceLocs.size() < 2)
754     return outliner::OutlinedFunction();
755 
756   unsigned SequenceSize = 0;
757 
758   auto I = RepeatedSequenceLocs[0].front();
759   auto E = std::next(RepeatedSequenceLocs[0].back());
760   for (; I != E; ++I)
761     SequenceSize += getInstSizeInBytes(*I);
762 
763   // call t0, function = 8 bytes.
764   unsigned CallOverhead = 8;
765   for (auto &C : RepeatedSequenceLocs)
766     C.setCallInfo(MachineOutlinerDefault, CallOverhead);
767 
768   // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
769   unsigned FrameOverhead = 4;
770   if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
771           .getFeatureBits()[RISCV::FeatureStdExtC])
772     FrameOverhead = 2;
773 
774   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
775                                     FrameOverhead, MachineOutlinerDefault);
776 }
777 
778 outliner::InstrType
779 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
780                                  unsigned Flags) const {
781   MachineInstr &MI = *MBBI;
782   MachineBasicBlock *MBB = MI.getParent();
783   const TargetRegisterInfo *TRI =
784       MBB->getParent()->getSubtarget().getRegisterInfo();
785 
786   // Positions generally can't safely be outlined.
787   if (MI.isPosition()) {
788     // We can manually strip out CFI instructions later.
789     if (MI.isCFIInstruction())
790       return outliner::InstrType::Invisible;
791 
792     return outliner::InstrType::Illegal;
793   }
794 
795   // Don't trust the user to write safe inline assembly.
796   if (MI.isInlineAsm())
797     return outliner::InstrType::Illegal;
798 
799   // We can't outline branches to other basic blocks.
800   if (MI.isTerminator() && !MBB->succ_empty())
801     return outliner::InstrType::Illegal;
802 
803   // We need support for tail calls to outlined functions before return
804   // statements can be allowed.
805   if (MI.isReturn())
806     return outliner::InstrType::Illegal;
807 
808   // Don't allow modifying the X5 register which we use for return addresses for
809   // these outlined functions.
810   if (MI.modifiesRegister(RISCV::X5, TRI) ||
811       MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
812     return outliner::InstrType::Illegal;
813 
814   // Make sure the operands don't reference something unsafe.
815   for (const auto &MO : MI.operands())
816     if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
817       return outliner::InstrType::Illegal;
818 
819   // Don't allow instructions which won't be materialized to impact outlining
820   // analysis.
821   if (MI.isMetaInstruction())
822     return outliner::InstrType::Invisible;
823 
824   return outliner::InstrType::Legal;
825 }
826 
827 void RISCVInstrInfo::buildOutlinedFrame(
828     MachineBasicBlock &MBB, MachineFunction &MF,
829     const outliner::OutlinedFunction &OF) const {
830 
831   // Strip out any CFI instructions
832   bool Changed = true;
833   while (Changed) {
834     Changed = false;
835     auto I = MBB.begin();
836     auto E = MBB.end();
837     for (; I != E; ++I) {
838       if (I->isCFIInstruction()) {
839         I->removeFromParent();
840         Changed = true;
841         break;
842       }
843     }
844   }
845 
846   MBB.addLiveIn(RISCV::X5);
847 
848   // Add in a return instruction to the end of the outlined frame.
849   MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
850       .addReg(RISCV::X0, RegState::Define)
851       .addReg(RISCV::X5)
852       .addImm(0));
853 }
854 
855 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
856     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
857     MachineFunction &MF, const outliner::Candidate &C) const {
858 
859   // Add in a call instruction to the outlined function at the given location.
860   It = MBB.insert(It,
861                   BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
862                       .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
863                                         RISCVII::MO_CALL));
864   return It;
865 }
866