1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
14 #include "RISCV.h"
15 #include "RISCVSubtarget.h"
16 #include "RISCVTargetMachine.h"
17 #include "Utils/RISCVMatInt.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
26 
27 using namespace llvm;
28 
29 #define GEN_CHECK_COMPRESS_INSTR
30 #include "RISCVGenCompressInstEmitter.inc"
31 
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "RISCVGenInstrInfo.inc"
34 
35 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
36     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
37       STI(STI) {}
38 
39 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
40                                              int &FrameIndex) const {
41   switch (MI.getOpcode()) {
42   default:
43     return 0;
44   case RISCV::LB:
45   case RISCV::LBU:
46   case RISCV::LH:
47   case RISCV::LHU:
48   case RISCV::LW:
49   case RISCV::FLW:
50   case RISCV::LWU:
51   case RISCV::LD:
52   case RISCV::FLD:
53     break;
54   }
55 
56   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
57       MI.getOperand(2).getImm() == 0) {
58     FrameIndex = MI.getOperand(1).getIndex();
59     return MI.getOperand(0).getReg();
60   }
61 
62   return 0;
63 }
64 
65 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
66                                             int &FrameIndex) const {
67   switch (MI.getOpcode()) {
68   default:
69     return 0;
70   case RISCV::SB:
71   case RISCV::SH:
72   case RISCV::SW:
73   case RISCV::FSW:
74   case RISCV::SD:
75   case RISCV::FSD:
76     break;
77   }
78 
79   if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
80       MI.getOperand(1).getImm() == 0) {
81     FrameIndex = MI.getOperand(0).getIndex();
82     return MI.getOperand(2).getReg();
83   }
84 
85   return 0;
86 }
87 
88 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
89                                  MachineBasicBlock::iterator MBBI,
90                                  const DebugLoc &DL, MCRegister DstReg,
91                                  MCRegister SrcReg, bool KillSrc) const {
92   if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
93     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
94         .addReg(SrcReg, getKillRegState(KillSrc))
95         .addImm(0);
96     return;
97   }
98 
99   // FPR->FPR copies
100   unsigned Opc;
101   if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
102     Opc = RISCV::FSGNJ_S;
103   else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
104     Opc = RISCV::FSGNJ_D;
105   else
106     llvm_unreachable("Impossible reg-to-reg copy");
107 
108   BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
109       .addReg(SrcReg, getKillRegState(KillSrc))
110       .addReg(SrcReg, getKillRegState(KillSrc));
111 }
112 
113 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
114                                          MachineBasicBlock::iterator I,
115                                          Register SrcReg, bool IsKill, int FI,
116                                          const TargetRegisterClass *RC,
117                                          const TargetRegisterInfo *TRI) const {
118   DebugLoc DL;
119   if (I != MBB.end())
120     DL = I->getDebugLoc();
121 
122   unsigned Opcode;
123 
124   if (RISCV::GPRRegClass.hasSubClassEq(RC))
125     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
126              RISCV::SW : RISCV::SD;
127   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
128     Opcode = RISCV::FSW;
129   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
130     Opcode = RISCV::FSD;
131   else
132     llvm_unreachable("Can't store this register to stack slot");
133 
134   BuildMI(MBB, I, DL, get(Opcode))
135       .addReg(SrcReg, getKillRegState(IsKill))
136       .addFrameIndex(FI)
137       .addImm(0);
138 }
139 
140 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
141                                           MachineBasicBlock::iterator I,
142                                           Register DstReg, int FI,
143                                           const TargetRegisterClass *RC,
144                                           const TargetRegisterInfo *TRI) const {
145   DebugLoc DL;
146   if (I != MBB.end())
147     DL = I->getDebugLoc();
148 
149   unsigned Opcode;
150 
151   if (RISCV::GPRRegClass.hasSubClassEq(RC))
152     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
153              RISCV::LW : RISCV::LD;
154   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
155     Opcode = RISCV::FLW;
156   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
157     Opcode = RISCV::FLD;
158   else
159     llvm_unreachable("Can't load this register from stack slot");
160 
161   BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0);
162 }
163 
164 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
165                             MachineBasicBlock::iterator MBBI,
166                             const DebugLoc &DL, Register DstReg, uint64_t Val,
167                             MachineInstr::MIFlag Flag) const {
168   MachineFunction *MF = MBB.getParent();
169   MachineRegisterInfo &MRI = MF->getRegInfo();
170   bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
171   Register SrcReg = RISCV::X0;
172   Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
173   unsigned Num = 0;
174 
175   if (!IsRV64 && !isInt<32>(Val))
176     report_fatal_error("Should only materialize 32-bit constants for RV32");
177 
178   RISCVMatInt::InstSeq Seq;
179   RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
180   assert(Seq.size() > 0);
181 
182   for (RISCVMatInt::Inst &Inst : Seq) {
183     // Write the final result to DstReg if it's the last instruction in the Seq.
184     // Otherwise, write the result to the temp register.
185     if (++Num == Seq.size())
186       Result = DstReg;
187 
188     if (Inst.Opc == RISCV::LUI) {
189       BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
190           .addImm(Inst.Imm)
191           .setMIFlag(Flag);
192     } else {
193       BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
194           .addReg(SrcReg, RegState::Kill)
195           .addImm(Inst.Imm)
196           .setMIFlag(Flag);
197     }
198     // Only the first instruction has X0 as its source.
199     SrcReg = Result;
200   }
201 }
202 
203 // The contents of values added to Cond are not examined outside of
204 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
205 // push BranchOpcode, Reg1, Reg2.
206 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
207                             SmallVectorImpl<MachineOperand> &Cond) {
208   // Block ends with fall-through condbranch.
209   assert(LastInst.getDesc().isConditionalBranch() &&
210          "Unknown conditional branch");
211   Target = LastInst.getOperand(2).getMBB();
212   Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
213   Cond.push_back(LastInst.getOperand(0));
214   Cond.push_back(LastInst.getOperand(1));
215 }
216 
217 static unsigned getOppositeBranchOpcode(int Opc) {
218   switch (Opc) {
219   default:
220     llvm_unreachable("Unrecognized conditional branch");
221   case RISCV::BEQ:
222     return RISCV::BNE;
223   case RISCV::BNE:
224     return RISCV::BEQ;
225   case RISCV::BLT:
226     return RISCV::BGE;
227   case RISCV::BGE:
228     return RISCV::BLT;
229   case RISCV::BLTU:
230     return RISCV::BGEU;
231   case RISCV::BGEU:
232     return RISCV::BLTU;
233   }
234 }
235 
236 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
237                                    MachineBasicBlock *&TBB,
238                                    MachineBasicBlock *&FBB,
239                                    SmallVectorImpl<MachineOperand> &Cond,
240                                    bool AllowModify) const {
241   TBB = FBB = nullptr;
242   Cond.clear();
243 
244   // If the block has no terminators, it just falls into the block after it.
245   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
246   if (I == MBB.end() || !isUnpredicatedTerminator(*I))
247     return false;
248 
249   // Count the number of terminators and find the first unconditional or
250   // indirect branch.
251   MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
252   int NumTerminators = 0;
253   for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
254        J++) {
255     NumTerminators++;
256     if (J->getDesc().isUnconditionalBranch() ||
257         J->getDesc().isIndirectBranch()) {
258       FirstUncondOrIndirectBr = J.getReverse();
259     }
260   }
261 
262   // If AllowModify is true, we can erase any terminators after
263   // FirstUncondOrIndirectBR.
264   if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
265     while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
266       std::next(FirstUncondOrIndirectBr)->eraseFromParent();
267       NumTerminators--;
268     }
269     I = FirstUncondOrIndirectBr;
270   }
271 
272   // We can't handle blocks that end in an indirect branch.
273   if (I->getDesc().isIndirectBranch())
274     return true;
275 
276   // We can't handle blocks with more than 2 terminators.
277   if (NumTerminators > 2)
278     return true;
279 
280   // Handle a single unconditional branch.
281   if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
282     TBB = I->getOperand(0).getMBB();
283     return false;
284   }
285 
286   // Handle a single conditional branch.
287   if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
288     parseCondBranch(*I, TBB, Cond);
289     return false;
290   }
291 
292   // Handle a conditional branch followed by an unconditional branch.
293   if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
294       I->getDesc().isUnconditionalBranch()) {
295     parseCondBranch(*std::prev(I), TBB, Cond);
296     FBB = I->getOperand(0).getMBB();
297     return false;
298   }
299 
300   // Otherwise, we can't handle this.
301   return true;
302 }
303 
304 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
305                                       int *BytesRemoved) const {
306   if (BytesRemoved)
307     *BytesRemoved = 0;
308   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
309   if (I == MBB.end())
310     return 0;
311 
312   if (!I->getDesc().isUnconditionalBranch() &&
313       !I->getDesc().isConditionalBranch())
314     return 0;
315 
316   // Remove the branch.
317   if (BytesRemoved)
318     *BytesRemoved += getInstSizeInBytes(*I);
319   I->eraseFromParent();
320 
321   I = MBB.end();
322 
323   if (I == MBB.begin())
324     return 1;
325   --I;
326   if (!I->getDesc().isConditionalBranch())
327     return 1;
328 
329   // Remove the branch.
330   if (BytesRemoved)
331     *BytesRemoved += getInstSizeInBytes(*I);
332   I->eraseFromParent();
333   return 2;
334 }
335 
336 // Inserts a branch into the end of the specific MachineBasicBlock, returning
337 // the number of instructions inserted.
338 unsigned RISCVInstrInfo::insertBranch(
339     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
340     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
341   if (BytesAdded)
342     *BytesAdded = 0;
343 
344   // Shouldn't be a fall through.
345   assert(TBB && "insertBranch must not be told to insert a fallthrough");
346   assert((Cond.size() == 3 || Cond.size() == 0) &&
347          "RISCV branch conditions have two components!");
348 
349   // Unconditional branch.
350   if (Cond.empty()) {
351     MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
352     if (BytesAdded)
353       *BytesAdded += getInstSizeInBytes(MI);
354     return 1;
355   }
356 
357   // Either a one or two-way conditional branch.
358   unsigned Opc = Cond[0].getImm();
359   MachineInstr &CondMI =
360       *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
361   if (BytesAdded)
362     *BytesAdded += getInstSizeInBytes(CondMI);
363 
364   // One-way conditional branch.
365   if (!FBB)
366     return 1;
367 
368   // Two-way conditional branch.
369   MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
370   if (BytesAdded)
371     *BytesAdded += getInstSizeInBytes(MI);
372   return 2;
373 }
374 
375 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
376                                               MachineBasicBlock &DestBB,
377                                               const DebugLoc &DL,
378                                               int64_t BrOffset,
379                                               RegScavenger *RS) const {
380   assert(RS && "RegScavenger required for long branching");
381   assert(MBB.empty() &&
382          "new block should be inserted for expanding unconditional branch");
383   assert(MBB.pred_size() == 1);
384 
385   MachineFunction *MF = MBB.getParent();
386   MachineRegisterInfo &MRI = MF->getRegInfo();
387   const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
388 
389   if (TM.isPositionIndependent())
390     report_fatal_error("Unable to insert indirect branch");
391 
392   if (!isInt<32>(BrOffset))
393     report_fatal_error(
394         "Branch offsets outside of the signed 32-bit range not supported");
395 
396   // FIXME: A virtual register must be used initially, as the register
397   // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
398   // uses the same workaround).
399   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
400   auto II = MBB.end();
401 
402   MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg)
403                              .addMBB(&DestBB, RISCVII::MO_HI);
404   BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND))
405       .addReg(ScratchReg, RegState::Kill)
406       .addMBB(&DestBB, RISCVII::MO_LO);
407 
408   RS->enterBasicBlockEnd(MBB);
409   unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
410                                                 LuiMI.getIterator(), false, 0);
411   MRI.replaceRegWith(ScratchReg, Scav);
412   MRI.clearVirtRegs();
413   RS->setRegUsed(Scav);
414   return 8;
415 }
416 
417 bool RISCVInstrInfo::reverseBranchCondition(
418     SmallVectorImpl<MachineOperand> &Cond) const {
419   assert((Cond.size() == 3) && "Invalid branch condition!");
420   Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
421   return false;
422 }
423 
424 MachineBasicBlock *
425 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
426   assert(MI.getDesc().isBranch() && "Unexpected opcode!");
427   // The branch target is always the last operand.
428   int NumOp = MI.getNumExplicitOperands();
429   return MI.getOperand(NumOp - 1).getMBB();
430 }
431 
432 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
433                                            int64_t BrOffset) const {
434   // Ideally we could determine the supported branch offset from the
435   // RISCVII::FormMask, but this can't be used for Pseudo instructions like
436   // PseudoBR.
437   switch (BranchOp) {
438   default:
439     llvm_unreachable("Unexpected opcode!");
440   case RISCV::BEQ:
441   case RISCV::BNE:
442   case RISCV::BLT:
443   case RISCV::BGE:
444   case RISCV::BLTU:
445   case RISCV::BGEU:
446     return isIntN(13, BrOffset);
447   case RISCV::JAL:
448   case RISCV::PseudoBR:
449     return isIntN(21, BrOffset);
450   }
451 }
452 
453 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
454   unsigned Opcode = MI.getOpcode();
455 
456   switch (Opcode) {
457   default: {
458     if (MI.getParent() && MI.getParent()->getParent()) {
459       const auto MF = MI.getMF();
460       const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
461       const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
462       const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
463       const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
464       if (isCompressibleInst(MI, &ST, MRI, STI))
465         return 2;
466     }
467     return get(Opcode).getSize();
468   }
469   case TargetOpcode::EH_LABEL:
470   case TargetOpcode::IMPLICIT_DEF:
471   case TargetOpcode::KILL:
472   case TargetOpcode::DBG_VALUE:
473     return 0;
474   case RISCV::PseudoCALLReg:
475   case RISCV::PseudoCALL:
476   case RISCV::PseudoJump:
477   case RISCV::PseudoTAIL:
478   case RISCV::PseudoLLA:
479   case RISCV::PseudoLA:
480   case RISCV::PseudoLA_TLS_IE:
481   case RISCV::PseudoLA_TLS_GD:
482     return 8;
483   case TargetOpcode::INLINEASM:
484   case TargetOpcode::INLINEASM_BR: {
485     const MachineFunction &MF = *MI.getParent()->getParent();
486     const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
487     return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
488                               *TM.getMCAsmInfo());
489   }
490   }
491 }
492 
493 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
494   const unsigned Opcode = MI.getOpcode();
495   switch(Opcode) {
496     default:
497       break;
498     case RISCV::ADDI:
499     case RISCV::ORI:
500     case RISCV::XORI:
501       return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0);
502   }
503   return MI.isAsCheapAsAMove();
504 }
505 
506 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
507                                        StringRef &ErrInfo) const {
508   const MCInstrInfo *MCII = STI.getInstrInfo();
509   MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
510 
511   for (auto &OI : enumerate(Desc.operands())) {
512     unsigned OpType = OI.value().OperandType;
513     if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
514         OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
515       const MachineOperand &MO = MI.getOperand(OI.index());
516       if (MO.isImm()) {
517         int64_t Imm = MO.getImm();
518         bool Ok;
519         switch (OpType) {
520         default:
521           llvm_unreachable("Unexpected operand type");
522         case RISCVOp::OPERAND_UIMM4:
523           Ok = isUInt<4>(Imm);
524           break;
525         case RISCVOp::OPERAND_UIMM5:
526           Ok = isUInt<5>(Imm);
527           break;
528         case RISCVOp::OPERAND_UIMM12:
529           Ok = isUInt<12>(Imm);
530           break;
531         case RISCVOp::OPERAND_SIMM12:
532           Ok = isInt<12>(Imm);
533           break;
534         case RISCVOp::OPERAND_SIMM13_LSB0:
535           Ok = isShiftedInt<12, 1>(Imm);
536           break;
537         case RISCVOp::OPERAND_UIMM20:
538           Ok = isUInt<20>(Imm);
539           break;
540         case RISCVOp::OPERAND_SIMM21_LSB0:
541           Ok = isShiftedInt<20, 1>(Imm);
542           break;
543         case RISCVOp::OPERAND_UIMMLOG2XLEN:
544           if (STI.getTargetTriple().isArch64Bit())
545             Ok = isUInt<6>(Imm);
546           else
547             Ok = isUInt<5>(Imm);
548           break;
549         }
550         if (!Ok) {
551           ErrInfo = "Invalid immediate";
552           return false;
553         }
554       }
555     }
556   }
557 
558   return true;
559 }
560 
561 // Return true if get the base operand, byte offset of an instruction and the
562 // memory width. Width is the size of memory that is being loaded/stored.
563 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
564     const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
565     unsigned &Width, const TargetRegisterInfo *TRI) const {
566   if (!LdSt.mayLoadOrStore())
567     return false;
568 
569   // Here we assume the standard RISC-V ISA, which uses a base+offset
570   // addressing mode. You'll need to relax these conditions to support custom
571   // load/stores instructions.
572   if (LdSt.getNumExplicitOperands() != 3)
573     return false;
574   if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
575     return false;
576 
577   if (!LdSt.hasOneMemOperand())
578     return false;
579 
580   Width = (*LdSt.memoperands_begin())->getSize();
581   BaseReg = &LdSt.getOperand(1);
582   Offset = LdSt.getOperand(2).getImm();
583   return true;
584 }
585 
586 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
587     const MachineInstr &MIa, const MachineInstr &MIb) const {
588   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
589   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
590 
591   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
592       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
593     return false;
594 
595   // Retrieve the base register, offset from the base register and width. Width
596   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
597   // base registers are identical, and the offset of a lower memory access +
598   // the width doesn't overlap the offset of a higher memory access,
599   // then the memory accesses are different.
600   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
601   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
602   int64_t OffsetA = 0, OffsetB = 0;
603   unsigned int WidthA = 0, WidthB = 0;
604   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
605       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
606     if (BaseOpA->isIdenticalTo(*BaseOpB)) {
607       int LowOffset = std::min(OffsetA, OffsetB);
608       int HighOffset = std::max(OffsetA, OffsetB);
609       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
610       if (LowOffset + LowWidth <= HighOffset)
611         return true;
612     }
613   }
614   return false;
615 }
616 
617 std::pair<unsigned, unsigned>
618 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
619   const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
620   return std::make_pair(TF & Mask, TF & ~Mask);
621 }
622 
623 ArrayRef<std::pair<unsigned, const char *>>
624 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
625   using namespace RISCVII;
626   static const std::pair<unsigned, const char *> TargetFlags[] = {
627       {MO_CALL, "riscv-call"},
628       {MO_PLT, "riscv-plt"},
629       {MO_LO, "riscv-lo"},
630       {MO_HI, "riscv-hi"},
631       {MO_PCREL_LO, "riscv-pcrel-lo"},
632       {MO_PCREL_HI, "riscv-pcrel-hi"},
633       {MO_GOT_HI, "riscv-got-hi"},
634       {MO_TPREL_LO, "riscv-tprel-lo"},
635       {MO_TPREL_HI, "riscv-tprel-hi"},
636       {MO_TPREL_ADD, "riscv-tprel-add"},
637       {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
638       {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
639   return makeArrayRef(TargetFlags);
640 }
641 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
642     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
643   const Function &F = MF.getFunction();
644 
645   // Can F be deduplicated by the linker? If it can, don't outline from it.
646   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
647     return false;
648 
649   // Don't outline from functions with section markings; the program could
650   // expect that all the code is in the named section.
651   if (F.hasSection())
652     return false;
653 
654   // It's safe to outline from MF.
655   return true;
656 }
657 
658 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
659                                             unsigned &Flags) const {
660   // More accurate safety checking is done in getOutliningCandidateInfo.
661   return true;
662 }
663 
664 // Enum values indicating how an outlined call should be constructed.
665 enum MachineOutlinerConstructionID {
666   MachineOutlinerDefault
667 };
668 
669 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
670     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
671 
672   // First we need to filter out candidates where the X5 register (IE t0) can't
673   // be used to setup the function call.
674   auto CannotInsertCall = [](outliner::Candidate &C) {
675     const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
676 
677     C.initLRU(*TRI);
678     LiveRegUnits LRU = C.LRU;
679     return !LRU.available(RISCV::X5);
680   };
681 
682   RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
683                                             RepeatedSequenceLocs.end(),
684                                             CannotInsertCall),
685                              RepeatedSequenceLocs.end());
686 
687   // If the sequence doesn't have enough candidates left, then we're done.
688   if (RepeatedSequenceLocs.size() < 2)
689     return outliner::OutlinedFunction();
690 
691   unsigned SequenceSize = 0;
692 
693   auto I = RepeatedSequenceLocs[0].front();
694   auto E = std::next(RepeatedSequenceLocs[0].back());
695   for (; I != E; ++I)
696     SequenceSize += getInstSizeInBytes(*I);
697 
698   // call t0, function = 8 bytes.
699   unsigned CallOverhead = 8;
700   for (auto &C : RepeatedSequenceLocs)
701     C.setCallInfo(MachineOutlinerDefault, CallOverhead);
702 
703   // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
704   unsigned FrameOverhead = 4;
705   if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
706           .getFeatureBits()[RISCV::FeatureStdExtC])
707     FrameOverhead = 2;
708 
709   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
710                                     FrameOverhead, MachineOutlinerDefault);
711 }
712 
713 outliner::InstrType
714 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
715                                  unsigned Flags) const {
716   MachineInstr &MI = *MBBI;
717   MachineBasicBlock *MBB = MI.getParent();
718   const TargetRegisterInfo *TRI =
719       MBB->getParent()->getSubtarget().getRegisterInfo();
720 
721   // Positions generally can't safely be outlined.
722   if (MI.isPosition()) {
723     // We can manually strip out CFI instructions later.
724     if (MI.isCFIInstruction())
725       return outliner::InstrType::Invisible;
726 
727     return outliner::InstrType::Illegal;
728   }
729 
730   // Don't trust the user to write safe inline assembly.
731   if (MI.isInlineAsm())
732     return outliner::InstrType::Illegal;
733 
734   // We can't outline branches to other basic blocks.
735   if (MI.isTerminator() && !MBB->succ_empty())
736     return outliner::InstrType::Illegal;
737 
738   // We need support for tail calls to outlined functions before return
739   // statements can be allowed.
740   if (MI.isReturn())
741     return outliner::InstrType::Illegal;
742 
743   // Don't allow modifying the X5 register which we use for return addresses for
744   // these outlined functions.
745   if (MI.modifiesRegister(RISCV::X5, TRI) ||
746       MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
747     return outliner::InstrType::Illegal;
748 
749   // Make sure the operands don't reference something unsafe.
750   for (const auto &MO : MI.operands())
751     if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
752       return outliner::InstrType::Illegal;
753 
754   // Don't allow instructions which won't be materialized to impact outlining
755   // analysis.
756   if (MI.isMetaInstruction())
757     return outliner::InstrType::Invisible;
758 
759   return outliner::InstrType::Legal;
760 }
761 
762 void RISCVInstrInfo::buildOutlinedFrame(
763     MachineBasicBlock &MBB, MachineFunction &MF,
764     const outliner::OutlinedFunction &OF) const {
765 
766   // Strip out any CFI instructions
767   bool Changed = true;
768   while (Changed) {
769     Changed = false;
770     auto I = MBB.begin();
771     auto E = MBB.end();
772     for (; I != E; ++I) {
773       if (I->isCFIInstruction()) {
774         I->removeFromParent();
775         Changed = true;
776         break;
777       }
778     }
779   }
780 
781   MBB.addLiveIn(RISCV::X5);
782 
783   // Add in a return instruction to the end of the outlined frame.
784   MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
785       .addReg(RISCV::X0, RegState::Define)
786       .addReg(RISCV::X5)
787       .addImm(0));
788 }
789 
790 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
791     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
792     MachineFunction &MF, const outliner::Candidate &C) const {
793 
794   // Add in a call instruction to the outlined function at the given location.
795   It = MBB.insert(It,
796                   BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
797                       .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
798                                         RISCVII::MO_CALL));
799   return It;
800 }
801