1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/IR/IntrinsicsRISCV.h"
19 #include "llvm/Support/Alignment.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/KnownBits.h"
22 #include "llvm/Support/MathExtras.h"
23 #include "llvm/Support/raw_ostream.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "riscv-isel"
28 
29 namespace llvm {
30 namespace RISCV {
31 #define GET_RISCVVSSEGTable_IMPL
32 #define GET_RISCVVLSEGTable_IMPL
33 #define GET_RISCVVLXSEGTable_IMPL
34 #define GET_RISCVVSXSEGTable_IMPL
35 #define GET_RISCVVLETable_IMPL
36 #define GET_RISCVVSETable_IMPL
37 #define GET_RISCVVLXTable_IMPL
38 #define GET_RISCVVSXTable_IMPL
39 #include "RISCVGenSearchableTables.inc"
40 } // namespace RISCV
41 } // namespace llvm
42 
43 void RISCVDAGToDAGISel::PostprocessISelDAG() {
44   doPeepholeLoadStoreADDI();
45 }
46 
47 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
48                          MVT XLenVT) {
49   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64);
50 
51   SDNode *Result = nullptr;
52   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
53   for (RISCVMatInt::Inst &Inst : Seq) {
54     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
55     if (Inst.Opc == RISCV::LUI)
56       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
57     else
58       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
59 
60     // Only the first instruction has X0 as its source.
61     SrcReg = SDValue(Result, 0);
62   }
63 
64   return Result;
65 }
66 
67 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
68                                unsigned RegClassID, unsigned SubReg0) {
69   assert(Regs.size() >= 2 && Regs.size() <= 8);
70 
71   SDLoc DL(Regs[0]);
72   SmallVector<SDValue, 8> Ops;
73 
74   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
75 
76   for (unsigned I = 0; I < Regs.size(); ++I) {
77     Ops.push_back(Regs[I]);
78     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
79   }
80   SDNode *N =
81       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
82   return SDValue(N, 0);
83 }
84 
85 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
86                              unsigned NF) {
87   static const unsigned RegClassIDs[] = {
88       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
89       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
90       RISCV::VRN8M1RegClassID};
91 
92   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
93 }
94 
95 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
96                              unsigned NF) {
97   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
98                                          RISCV::VRN3M2RegClassID,
99                                          RISCV::VRN4M2RegClassID};
100 
101   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
102 }
103 
104 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
105                              unsigned NF) {
106   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
107                          RISCV::sub_vrm4_0);
108 }
109 
110 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
111                            unsigned NF, RISCVVLMUL LMUL) {
112   switch (LMUL) {
113   default:
114     llvm_unreachable("Invalid LMUL.");
115   case RISCVVLMUL::LMUL_F8:
116   case RISCVVLMUL::LMUL_F4:
117   case RISCVVLMUL::LMUL_F2:
118   case RISCVVLMUL::LMUL_1:
119     return createM1Tuple(CurDAG, Regs, NF);
120   case RISCVVLMUL::LMUL_2:
121     return createM2Tuple(CurDAG, Regs, NF);
122   case RISCVVLMUL::LMUL_4:
123     return createM4Tuple(CurDAG, Regs, NF);
124   }
125 }
126 
127 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
128     SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp,
129     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
130     MVT *IndexVT) {
131   SDValue Chain = Node->getOperand(0);
132   SDValue Glue;
133 
134   SDValue Base;
135   SelectBaseAddr(Node->getOperand(CurOp++), Base);
136   Operands.push_back(Base); // Base pointer.
137 
138   if (IsStridedOrIndexed) {
139     Operands.push_back(Node->getOperand(CurOp++)); // Index.
140     if (IndexVT)
141       *IndexVT = Operands.back()->getSimpleValueType(0);
142   }
143 
144   if (IsMasked) {
145     // Mask needs to be copied to V0.
146     SDValue Mask = Node->getOperand(CurOp++);
147     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
148     Glue = Chain.getValue(1);
149     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
150   }
151   SDValue VL;
152   selectVLOp(Node->getOperand(CurOp++), VL);
153   Operands.push_back(VL);
154 
155   MVT XLenVT = Subtarget->getXLenVT();
156   SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT);
157   Operands.push_back(SEW);
158 
159   Operands.push_back(Chain); // Chain.
160   if (Glue)
161     Operands.push_back(Glue);
162 }
163 
164 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
165                                     bool IsStrided) {
166   SDLoc DL(Node);
167   unsigned NF = Node->getNumValues() - 1;
168   MVT VT = Node->getSimpleValueType(0);
169   unsigned ScalarSize = VT.getScalarSizeInBits();
170   RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
171 
172   unsigned CurOp = 2;
173   SmallVector<SDValue, 8> Operands;
174   if (IsMasked) {
175     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
176                                  Node->op_begin() + CurOp + NF);
177     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
178     Operands.push_back(MaskedOff);
179     CurOp += NF;
180   }
181 
182   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
183                              Operands);
184 
185   const RISCV::VLSEGPseudo *P =
186       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
187                             static_cast<unsigned>(LMUL));
188   MachineSDNode *Load =
189       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
190 
191   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
192     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
193 
194   SDValue SuperReg = SDValue(Load, 0);
195   for (unsigned I = 0; I < NF; ++I) {
196     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
197     ReplaceUses(SDValue(Node, I),
198                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
199   }
200 
201   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
202   CurDAG->RemoveDeadNode(Node);
203 }
204 
205 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
206   SDLoc DL(Node);
207   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
208   MVT VT = Node->getSimpleValueType(0);
209   MVT XLenVT = Subtarget->getXLenVT();
210   unsigned ScalarSize = VT.getScalarSizeInBits();
211   RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
212 
213   unsigned CurOp = 2;
214   SmallVector<SDValue, 7> Operands;
215   if (IsMasked) {
216     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
217                                  Node->op_begin() + CurOp + NF);
218     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
219     Operands.push_back(MaskedOff);
220     CurOp += NF;
221   }
222 
223   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
224                              /*IsStridedOrIndexed*/ false, Operands);
225 
226   const RISCV::VLSEGPseudo *P =
227       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
228                             ScalarSize, static_cast<unsigned>(LMUL));
229   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
230                                                MVT::Other, MVT::Glue, Operands);
231   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
232                                           /*Glue*/ SDValue(Load, 2));
233 
234   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
235     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
236 
237   SDValue SuperReg = SDValue(Load, 0);
238   for (unsigned I = 0; I < NF; ++I) {
239     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
240     ReplaceUses(SDValue(Node, I),
241                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
242   }
243 
244   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
245   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
246   CurDAG->RemoveDeadNode(Node);
247 }
248 
249 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
250                                      bool IsOrdered) {
251   SDLoc DL(Node);
252   unsigned NF = Node->getNumValues() - 1;
253   MVT VT = Node->getSimpleValueType(0);
254   unsigned ScalarSize = VT.getScalarSizeInBits();
255   RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
256 
257   unsigned CurOp = 2;
258   SmallVector<SDValue, 8> Operands;
259   if (IsMasked) {
260     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
261                                  Node->op_begin() + CurOp + NF);
262     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
263     Operands.push_back(MaskedOff);
264     CurOp += NF;
265   }
266 
267   MVT IndexVT;
268   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
269                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
270 
271   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
272          "Element count mismatch");
273 
274   RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
275   unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
276   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
277       NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
278       static_cast<unsigned>(IndexLMUL));
279   MachineSDNode *Load =
280       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
281 
282   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
283     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
284 
285   SDValue SuperReg = SDValue(Load, 0);
286   for (unsigned I = 0; I < NF; ++I) {
287     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
288     ReplaceUses(SDValue(Node, I),
289                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
290   }
291 
292   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
293   CurDAG->RemoveDeadNode(Node);
294 }
295 
296 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
297                                     bool IsStrided) {
298   SDLoc DL(Node);
299   unsigned NF = Node->getNumOperands() - 4;
300   if (IsStrided)
301     NF--;
302   if (IsMasked)
303     NF--;
304   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
305   unsigned ScalarSize = VT.getScalarSizeInBits();
306   RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
307   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
308   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
309 
310   SmallVector<SDValue, 8> Operands;
311   Operands.push_back(StoreVal);
312   unsigned CurOp = 2 + NF;
313 
314   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
315                              Operands);
316 
317   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
318       NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
319   MachineSDNode *Store =
320       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
321 
322   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
323     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
324 
325   ReplaceNode(Node, Store);
326 }
327 
328 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
329                                      bool IsOrdered) {
330   SDLoc DL(Node);
331   unsigned NF = Node->getNumOperands() - 5;
332   if (IsMasked)
333     --NF;
334   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
335   unsigned ScalarSize = VT.getScalarSizeInBits();
336   RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
337   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
338   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
339 
340   SmallVector<SDValue, 8> Operands;
341   Operands.push_back(StoreVal);
342   unsigned CurOp = 2 + NF;
343 
344   MVT IndexVT;
345   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
346                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
347 
348   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
349          "Element count mismatch");
350 
351   RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
352   unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
353   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
354       NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
355       static_cast<unsigned>(IndexLMUL));
356   MachineSDNode *Store =
357       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
358 
359   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
360     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
361 
362   ReplaceNode(Node, Store);
363 }
364 
365 
366 void RISCVDAGToDAGISel::Select(SDNode *Node) {
367   // If we have a custom node, we have already selected.
368   if (Node->isMachineOpcode()) {
369     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
370     Node->setNodeId(-1);
371     return;
372   }
373 
374   // Instruction Selection not handled by the auto-generated tablegen selection
375   // should be handled here.
376   unsigned Opcode = Node->getOpcode();
377   MVT XLenVT = Subtarget->getXLenVT();
378   SDLoc DL(Node);
379   MVT VT = Node->getSimpleValueType(0);
380 
381   switch (Opcode) {
382   case ISD::Constant: {
383     auto *ConstNode = cast<ConstantSDNode>(Node);
384     if (VT == XLenVT && ConstNode->isNullValue()) {
385       SDValue New =
386           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
387       ReplaceNode(Node, New.getNode());
388       return;
389     }
390     ReplaceNode(Node, selectImm(CurDAG, DL, ConstNode->getSExtValue(), XLenVT));
391     return;
392   }
393   case ISD::FrameIndex: {
394     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
395     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
396     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
397     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
398     return;
399   }
400   case ISD::SRL: {
401     // We don't need this transform if zext.h is supported.
402     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
403       break;
404     // Optimize (srl (and X, 0xffff), C) ->
405     //          (srli (slli X, (XLen-16), (XLen-16) + C)
406     // Taking into account that the 0xffff may have had lower bits unset by
407     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
408     // This pattern occurs when type legalizing i16 right shifts.
409     // FIXME: This could be extended to other AND masks.
410     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
411     if (N1C) {
412       uint64_t ShAmt = N1C->getZExtValue();
413       SDValue N0 = Node->getOperand(0);
414       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
415           isa<ConstantSDNode>(N0.getOperand(1))) {
416         uint64_t Mask = N0.getConstantOperandVal(1);
417         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
418         if (Mask == 0xffff) {
419           unsigned LShAmt = Subtarget->getXLen() - 16;
420           SDNode *SLLI =
421               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
422                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
423           SDNode *SRLI = CurDAG->getMachineNode(
424               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
425               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
426           ReplaceNode(Node, SRLI);
427           return;
428         }
429       }
430     }
431 
432     break;
433   }
434   case ISD::INTRINSIC_WO_CHAIN: {
435     unsigned IntNo = Node->getConstantOperandVal(0);
436     switch (IntNo) {
437       // By default we do not custom select any intrinsic.
438     default:
439       break;
440     case Intrinsic::riscv_vmsgeu:
441     case Intrinsic::riscv_vmsge: {
442       SDValue Src1 = Node->getOperand(1);
443       SDValue Src2 = Node->getOperand(2);
444       // Only custom select scalar second operand.
445       if (Src2.getValueType() != XLenVT)
446         break;
447       // Small constants are handled with patterns.
448       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
449         int64_t CVal = C->getSExtValue();
450         if (CVal >= -15 && CVal <= 16)
451           break;
452       }
453       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
454       MVT Src1VT = Src1.getSimpleValueType();
455       unsigned VMSLTOpcode, VMNANDOpcode;
456       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
457       default:
458         llvm_unreachable("Unexpected LMUL!");
459       case RISCVVLMUL::LMUL_F8:
460         VMSLTOpcode =
461             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
462         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
463         break;
464       case RISCVVLMUL::LMUL_F4:
465         VMSLTOpcode =
466             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
467         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
468         break;
469       case RISCVVLMUL::LMUL_F2:
470         VMSLTOpcode =
471             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
472         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
473         break;
474       case RISCVVLMUL::LMUL_1:
475         VMSLTOpcode =
476             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
477         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
478         break;
479       case RISCVVLMUL::LMUL_2:
480         VMSLTOpcode =
481             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
482         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
483         break;
484       case RISCVVLMUL::LMUL_4:
485         VMSLTOpcode =
486             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
487         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
488         break;
489       case RISCVVLMUL::LMUL_8:
490         VMSLTOpcode =
491             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
492         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
493         break;
494       }
495       SDValue SEW =
496           CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
497       SDValue VL;
498       selectVLOp(Node->getOperand(3), VL);
499 
500       // Expand to
501       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
502       SDValue Cmp = SDValue(
503           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
504           0);
505       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
506                                                {Cmp, Cmp, VL, SEW}));
507       return;
508     }
509     case Intrinsic::riscv_vmsgeu_mask:
510     case Intrinsic::riscv_vmsge_mask: {
511       SDValue Src1 = Node->getOperand(2);
512       SDValue Src2 = Node->getOperand(3);
513       // Only custom select scalar second operand.
514       if (Src2.getValueType() != XLenVT)
515         break;
516       // Small constants are handled with patterns.
517       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
518         int64_t CVal = C->getSExtValue();
519         if (CVal >= -15 && CVal <= 16)
520           break;
521       }
522       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
523       MVT Src1VT = Src1.getSimpleValueType();
524       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
525       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
526       default:
527         llvm_unreachable("Unexpected LMUL!");
528       case RISCVVLMUL::LMUL_F8:
529         VMSLTOpcode =
530             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
531         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
532                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
533         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
534         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
535         break;
536       case RISCVVLMUL::LMUL_F4:
537         VMSLTOpcode =
538             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
539         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
540                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
541         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
542         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
543         break;
544       case RISCVVLMUL::LMUL_F2:
545         VMSLTOpcode =
546             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
547         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
548                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
549         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
550         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
551         break;
552       case RISCVVLMUL::LMUL_1:
553         VMSLTOpcode =
554             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
555         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
556                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
557         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
558         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
559         break;
560       case RISCVVLMUL::LMUL_2:
561         VMSLTOpcode =
562             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
563         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
564                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
565         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
566         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
567         break;
568       case RISCVVLMUL::LMUL_4:
569         VMSLTOpcode =
570             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
571         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
572                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
573         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
574         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
575         break;
576       case RISCVVLMUL::LMUL_8:
577         VMSLTOpcode =
578             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
579         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
580                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
581         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
582         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
583         break;
584       }
585       SDValue SEW =
586           CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
587       SDValue VL;
588       selectVLOp(Node->getOperand(5), VL);
589       SDValue MaskedOff = Node->getOperand(1);
590       SDValue Mask = Node->getOperand(4);
591       // If the MaskedOff value and the Mask are the same value use
592       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
593       // This avoids needing to copy v0 to vd before starting the next sequence.
594       if (Mask == MaskedOff) {
595         SDValue Cmp = SDValue(
596             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
597             0);
598         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
599                                                  {Mask, Cmp, VL, SEW}));
600         return;
601       }
602 
603       // Otherwise use
604       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
605       SDValue Cmp = SDValue(
606           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
607                                  {MaskedOff, Src1, Src2, Mask, VL, SEW}),
608           0);
609       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
610                                                {Cmp, Mask, VL, SEW}));
611       return;
612     }
613     }
614     break;
615   }
616   case ISD::INTRINSIC_W_CHAIN: {
617     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
618     switch (IntNo) {
619       // By default we do not custom select any intrinsic.
620     default:
621       break;
622 
623     case Intrinsic::riscv_vsetvli:
624     case Intrinsic::riscv_vsetvlimax: {
625       if (!Subtarget->hasStdExtV())
626         break;
627 
628       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
629       unsigned Offset = VLMax ? 2 : 3;
630 
631       assert(Node->getNumOperands() == Offset + 2 &&
632              "Unexpected number of operands");
633 
634       RISCVVSEW VSEW =
635           static_cast<RISCVVSEW>(Node->getConstantOperandVal(Offset) & 0x7);
636       RISCVVLMUL VLMul = static_cast<RISCVVLMUL>(
637           Node->getConstantOperandVal(Offset + 1) & 0x7);
638 
639       unsigned VTypeI = RISCVVType::encodeVTYPE(
640           VLMul, VSEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
641       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
642 
643       SDValue VLOperand;
644       if (VLMax) {
645         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
646       } else {
647         VLOperand = Node->getOperand(2);
648 
649         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
650           uint64_t AVL = C->getZExtValue();
651           if (isUInt<5>(AVL)) {
652             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
653             ReplaceNode(
654                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
655                                              MVT::Other, VLImm, VTypeIOp,
656                                              /* Chain */ Node->getOperand(0)));
657             return;
658           }
659         }
660       }
661 
662       ReplaceNode(Node,
663                   CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
664                                          MVT::Other, VLOperand, VTypeIOp,
665                                          /* Chain */ Node->getOperand(0)));
666       return;
667     }
668     case Intrinsic::riscv_vlseg2:
669     case Intrinsic::riscv_vlseg3:
670     case Intrinsic::riscv_vlseg4:
671     case Intrinsic::riscv_vlseg5:
672     case Intrinsic::riscv_vlseg6:
673     case Intrinsic::riscv_vlseg7:
674     case Intrinsic::riscv_vlseg8: {
675       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
676       return;
677     }
678     case Intrinsic::riscv_vlseg2_mask:
679     case Intrinsic::riscv_vlseg3_mask:
680     case Intrinsic::riscv_vlseg4_mask:
681     case Intrinsic::riscv_vlseg5_mask:
682     case Intrinsic::riscv_vlseg6_mask:
683     case Intrinsic::riscv_vlseg7_mask:
684     case Intrinsic::riscv_vlseg8_mask: {
685       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
686       return;
687     }
688     case Intrinsic::riscv_vlsseg2:
689     case Intrinsic::riscv_vlsseg3:
690     case Intrinsic::riscv_vlsseg4:
691     case Intrinsic::riscv_vlsseg5:
692     case Intrinsic::riscv_vlsseg6:
693     case Intrinsic::riscv_vlsseg7:
694     case Intrinsic::riscv_vlsseg8: {
695       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
696       return;
697     }
698     case Intrinsic::riscv_vlsseg2_mask:
699     case Intrinsic::riscv_vlsseg3_mask:
700     case Intrinsic::riscv_vlsseg4_mask:
701     case Intrinsic::riscv_vlsseg5_mask:
702     case Intrinsic::riscv_vlsseg6_mask:
703     case Intrinsic::riscv_vlsseg7_mask:
704     case Intrinsic::riscv_vlsseg8_mask: {
705       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
706       return;
707     }
708     case Intrinsic::riscv_vloxseg2:
709     case Intrinsic::riscv_vloxseg3:
710     case Intrinsic::riscv_vloxseg4:
711     case Intrinsic::riscv_vloxseg5:
712     case Intrinsic::riscv_vloxseg6:
713     case Intrinsic::riscv_vloxseg7:
714     case Intrinsic::riscv_vloxseg8:
715       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
716       return;
717     case Intrinsic::riscv_vluxseg2:
718     case Intrinsic::riscv_vluxseg3:
719     case Intrinsic::riscv_vluxseg4:
720     case Intrinsic::riscv_vluxseg5:
721     case Intrinsic::riscv_vluxseg6:
722     case Intrinsic::riscv_vluxseg7:
723     case Intrinsic::riscv_vluxseg8:
724       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
725       return;
726     case Intrinsic::riscv_vloxseg2_mask:
727     case Intrinsic::riscv_vloxseg3_mask:
728     case Intrinsic::riscv_vloxseg4_mask:
729     case Intrinsic::riscv_vloxseg5_mask:
730     case Intrinsic::riscv_vloxseg6_mask:
731     case Intrinsic::riscv_vloxseg7_mask:
732     case Intrinsic::riscv_vloxseg8_mask:
733       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
734       return;
735     case Intrinsic::riscv_vluxseg2_mask:
736     case Intrinsic::riscv_vluxseg3_mask:
737     case Intrinsic::riscv_vluxseg4_mask:
738     case Intrinsic::riscv_vluxseg5_mask:
739     case Intrinsic::riscv_vluxseg6_mask:
740     case Intrinsic::riscv_vluxseg7_mask:
741     case Intrinsic::riscv_vluxseg8_mask:
742       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
743       return;
744     case Intrinsic::riscv_vlseg8ff:
745     case Intrinsic::riscv_vlseg7ff:
746     case Intrinsic::riscv_vlseg6ff:
747     case Intrinsic::riscv_vlseg5ff:
748     case Intrinsic::riscv_vlseg4ff:
749     case Intrinsic::riscv_vlseg3ff:
750     case Intrinsic::riscv_vlseg2ff: {
751       selectVLSEGFF(Node, /*IsMasked*/ false);
752       return;
753     }
754     case Intrinsic::riscv_vlseg8ff_mask:
755     case Intrinsic::riscv_vlseg7ff_mask:
756     case Intrinsic::riscv_vlseg6ff_mask:
757     case Intrinsic::riscv_vlseg5ff_mask:
758     case Intrinsic::riscv_vlseg4ff_mask:
759     case Intrinsic::riscv_vlseg3ff_mask:
760     case Intrinsic::riscv_vlseg2ff_mask: {
761       selectVLSEGFF(Node, /*IsMasked*/ true);
762       return;
763     }
764     case Intrinsic::riscv_vloxei:
765     case Intrinsic::riscv_vloxei_mask:
766     case Intrinsic::riscv_vluxei:
767     case Intrinsic::riscv_vluxei_mask: {
768       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
769                       IntNo == Intrinsic::riscv_vluxei_mask;
770       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
771                        IntNo == Intrinsic::riscv_vloxei_mask;
772 
773       MVT VT = Node->getSimpleValueType(0);
774       unsigned ScalarSize = VT.getScalarSizeInBits();
775 
776       unsigned CurOp = 2;
777       SmallVector<SDValue, 8> Operands;
778       if (IsMasked)
779         Operands.push_back(Node->getOperand(CurOp++));
780 
781       MVT IndexVT;
782       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
783                                  /*IsStridedOrIndexed*/ true, Operands,
784                                  &IndexVT);
785 
786       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
787              "Element count mismatch");
788 
789       RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
790       RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
791       unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
792       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
793           IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
794           static_cast<unsigned>(IndexLMUL));
795       MachineSDNode *Load =
796           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
797 
798       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
799         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
800 
801       ReplaceNode(Node, Load);
802       return;
803     }
804     case Intrinsic::riscv_vle1:
805     case Intrinsic::riscv_vle:
806     case Intrinsic::riscv_vle_mask:
807     case Intrinsic::riscv_vlse:
808     case Intrinsic::riscv_vlse_mask: {
809       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
810                       IntNo == Intrinsic::riscv_vlse_mask;
811       bool IsStrided =
812           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
813 
814       MVT VT = Node->getSimpleValueType(0);
815       unsigned ScalarSize = VT.getScalarSizeInBits();
816       // VLE1 uses an SEW of 8.
817       unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
818 
819       unsigned CurOp = 2;
820       SmallVector<SDValue, 8> Operands;
821       if (IsMasked)
822         Operands.push_back(Node->getOperand(CurOp++));
823 
824       addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
825                                  Operands);
826 
827       RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
828       const RISCV::VLEPseudo *P =
829           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize,
830                               static_cast<unsigned>(LMUL));
831       MachineSDNode *Load =
832           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
833 
834       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
835         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
836 
837       ReplaceNode(Node, Load);
838       return;
839     }
840     case Intrinsic::riscv_vleff:
841     case Intrinsic::riscv_vleff_mask: {
842       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
843 
844       MVT VT = Node->getSimpleValueType(0);
845       unsigned ScalarSize = VT.getScalarSizeInBits();
846 
847       unsigned CurOp = 2;
848       SmallVector<SDValue, 7> Operands;
849       if (IsMasked)
850         Operands.push_back(Node->getOperand(CurOp++));
851 
852       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
853                                  /*IsStridedOrIndexed*/ false, Operands);
854 
855       RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
856       const RISCV::VLEPseudo *P =
857           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
858                               ScalarSize, static_cast<unsigned>(LMUL));
859       MachineSDNode *Load =
860           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
861                                  MVT::Other, MVT::Glue, Operands);
862       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
863                                               /*Glue*/ SDValue(Load, 2));
864 
865       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
866         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
867 
868       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
869       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
870       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
871       CurDAG->RemoveDeadNode(Node);
872       return;
873     }
874     }
875     break;
876   }
877   case ISD::INTRINSIC_VOID: {
878     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
879     switch (IntNo) {
880     case Intrinsic::riscv_vsseg2:
881     case Intrinsic::riscv_vsseg3:
882     case Intrinsic::riscv_vsseg4:
883     case Intrinsic::riscv_vsseg5:
884     case Intrinsic::riscv_vsseg6:
885     case Intrinsic::riscv_vsseg7:
886     case Intrinsic::riscv_vsseg8: {
887       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
888       return;
889     }
890     case Intrinsic::riscv_vsseg2_mask:
891     case Intrinsic::riscv_vsseg3_mask:
892     case Intrinsic::riscv_vsseg4_mask:
893     case Intrinsic::riscv_vsseg5_mask:
894     case Intrinsic::riscv_vsseg6_mask:
895     case Intrinsic::riscv_vsseg7_mask:
896     case Intrinsic::riscv_vsseg8_mask: {
897       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
898       return;
899     }
900     case Intrinsic::riscv_vssseg2:
901     case Intrinsic::riscv_vssseg3:
902     case Intrinsic::riscv_vssseg4:
903     case Intrinsic::riscv_vssseg5:
904     case Intrinsic::riscv_vssseg6:
905     case Intrinsic::riscv_vssseg7:
906     case Intrinsic::riscv_vssseg8: {
907       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
908       return;
909     }
910     case Intrinsic::riscv_vssseg2_mask:
911     case Intrinsic::riscv_vssseg3_mask:
912     case Intrinsic::riscv_vssseg4_mask:
913     case Intrinsic::riscv_vssseg5_mask:
914     case Intrinsic::riscv_vssseg6_mask:
915     case Intrinsic::riscv_vssseg7_mask:
916     case Intrinsic::riscv_vssseg8_mask: {
917       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
918       return;
919     }
920     case Intrinsic::riscv_vsoxseg2:
921     case Intrinsic::riscv_vsoxseg3:
922     case Intrinsic::riscv_vsoxseg4:
923     case Intrinsic::riscv_vsoxseg5:
924     case Intrinsic::riscv_vsoxseg6:
925     case Intrinsic::riscv_vsoxseg7:
926     case Intrinsic::riscv_vsoxseg8:
927       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
928       return;
929     case Intrinsic::riscv_vsuxseg2:
930     case Intrinsic::riscv_vsuxseg3:
931     case Intrinsic::riscv_vsuxseg4:
932     case Intrinsic::riscv_vsuxseg5:
933     case Intrinsic::riscv_vsuxseg6:
934     case Intrinsic::riscv_vsuxseg7:
935     case Intrinsic::riscv_vsuxseg8:
936       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
937       return;
938     case Intrinsic::riscv_vsoxseg2_mask:
939     case Intrinsic::riscv_vsoxseg3_mask:
940     case Intrinsic::riscv_vsoxseg4_mask:
941     case Intrinsic::riscv_vsoxseg5_mask:
942     case Intrinsic::riscv_vsoxseg6_mask:
943     case Intrinsic::riscv_vsoxseg7_mask:
944     case Intrinsic::riscv_vsoxseg8_mask:
945       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
946       return;
947     case Intrinsic::riscv_vsuxseg2_mask:
948     case Intrinsic::riscv_vsuxseg3_mask:
949     case Intrinsic::riscv_vsuxseg4_mask:
950     case Intrinsic::riscv_vsuxseg5_mask:
951     case Intrinsic::riscv_vsuxseg6_mask:
952     case Intrinsic::riscv_vsuxseg7_mask:
953     case Intrinsic::riscv_vsuxseg8_mask:
954       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
955       return;
956     case Intrinsic::riscv_vsoxei:
957     case Intrinsic::riscv_vsoxei_mask:
958     case Intrinsic::riscv_vsuxei:
959     case Intrinsic::riscv_vsuxei_mask: {
960       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
961                       IntNo == Intrinsic::riscv_vsuxei_mask;
962       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
963                        IntNo == Intrinsic::riscv_vsoxei_mask;
964 
965       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
966       unsigned ScalarSize = VT.getScalarSizeInBits();
967 
968       unsigned CurOp = 2;
969       SmallVector<SDValue, 8> Operands;
970       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
971 
972       MVT IndexVT;
973       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
974                                  /*IsStridedOrIndexed*/ true, Operands,
975                                  &IndexVT);
976 
977       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
978              "Element count mismatch");
979 
980       RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
981       RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
982       unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
983       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
984           IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
985           static_cast<unsigned>(IndexLMUL));
986       MachineSDNode *Store =
987           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
988 
989       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
990         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
991 
992       ReplaceNode(Node, Store);
993       return;
994     }
995     case Intrinsic::riscv_vse1:
996     case Intrinsic::riscv_vse:
997     case Intrinsic::riscv_vse_mask:
998     case Intrinsic::riscv_vsse:
999     case Intrinsic::riscv_vsse_mask: {
1000       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1001                       IntNo == Intrinsic::riscv_vsse_mask;
1002       bool IsStrided =
1003           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1004 
1005       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1006       unsigned ScalarSize = VT.getScalarSizeInBits();
1007       // VSE1 uses an SEW of 8.
1008       unsigned SEWImm = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
1009 
1010       unsigned CurOp = 2;
1011       SmallVector<SDValue, 8> Operands;
1012       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1013 
1014       addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
1015                                  Operands);
1016 
1017       RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1018       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1019           IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
1020       MachineSDNode *Store =
1021           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1022       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1023         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1024 
1025       ReplaceNode(Node, Store);
1026       return;
1027     }
1028     }
1029     break;
1030   }
1031   case ISD::BITCAST: {
1032     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1033     // Just drop bitcasts between vectors if both are fixed or both are
1034     // scalable.
1035     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1036         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1037       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1038       CurDAG->RemoveDeadNode(Node);
1039       return;
1040     }
1041     break;
1042   }
1043   case ISD::INSERT_SUBVECTOR: {
1044     SDValue V = Node->getOperand(0);
1045     SDValue SubV = Node->getOperand(1);
1046     SDLoc DL(SubV);
1047     auto Idx = Node->getConstantOperandVal(2);
1048     MVT SubVecVT = SubV.getSimpleValueType();
1049 
1050     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1051     MVT SubVecContainerVT = SubVecVT;
1052     // Establish the correct scalable-vector types for any fixed-length type.
1053     if (SubVecVT.isFixedLengthVector())
1054       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1055     if (VT.isFixedLengthVector())
1056       VT = TLI.getContainerForFixedLengthVector(VT);
1057 
1058     const auto *TRI = Subtarget->getRegisterInfo();
1059     unsigned SubRegIdx;
1060     std::tie(SubRegIdx, Idx) =
1061         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1062             VT, SubVecContainerVT, Idx, TRI);
1063 
1064     // If the Idx hasn't been completely eliminated then this is a subvector
1065     // insert which doesn't naturally align to a vector register. These must
1066     // be handled using instructions to manipulate the vector registers.
1067     if (Idx != 0)
1068       break;
1069 
1070     RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1071     bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
1072                            SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
1073                            SubVecLMUL == RISCVVLMUL::LMUL_F8;
1074     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1075     assert((!IsSubVecPartReg || V.isUndef()) &&
1076            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1077            "the subvector is smaller than a full-sized register");
1078 
1079     // If we haven't set a SubRegIdx, then we must be going between
1080     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1081     if (SubRegIdx == RISCV::NoSubRegister) {
1082       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1083       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1084                  InRegClassID &&
1085              "Unexpected subvector extraction");
1086       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1087       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1088                                                DL, VT, SubV, RC);
1089       ReplaceNode(Node, NewNode);
1090       return;
1091     }
1092 
1093     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1094     ReplaceNode(Node, Insert.getNode());
1095     return;
1096   }
1097   case ISD::EXTRACT_SUBVECTOR: {
1098     SDValue V = Node->getOperand(0);
1099     auto Idx = Node->getConstantOperandVal(1);
1100     MVT InVT = V.getSimpleValueType();
1101     SDLoc DL(V);
1102 
1103     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1104     MVT SubVecContainerVT = VT;
1105     // Establish the correct scalable-vector types for any fixed-length type.
1106     if (VT.isFixedLengthVector())
1107       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1108     if (InVT.isFixedLengthVector())
1109       InVT = TLI.getContainerForFixedLengthVector(InVT);
1110 
1111     const auto *TRI = Subtarget->getRegisterInfo();
1112     unsigned SubRegIdx;
1113     std::tie(SubRegIdx, Idx) =
1114         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1115             InVT, SubVecContainerVT, Idx, TRI);
1116 
1117     // If the Idx hasn't been completely eliminated then this is a subvector
1118     // extract which doesn't naturally align to a vector register. These must
1119     // be handled using instructions to manipulate the vector registers.
1120     if (Idx != 0)
1121       break;
1122 
1123     // If we haven't set a SubRegIdx, then we must be going between
1124     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1125     if (SubRegIdx == RISCV::NoSubRegister) {
1126       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1127       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1128                  InRegClassID &&
1129              "Unexpected subvector extraction");
1130       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1131       SDNode *NewNode =
1132           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1133       ReplaceNode(Node, NewNode);
1134       return;
1135     }
1136 
1137     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1138     ReplaceNode(Node, Extract.getNode());
1139     return;
1140   }
1141   case RISCVISD::VMV_V_X_VL:
1142   case RISCVISD::VFMV_V_F_VL: {
1143     // Try to match splat of a scalar load to a strided load with stride of x0.
1144     SDValue Src = Node->getOperand(0);
1145     auto *Ld = dyn_cast<LoadSDNode>(Src);
1146     if (!Ld)
1147       break;
1148     EVT MemVT = Ld->getMemoryVT();
1149     // The memory VT should be the same size as the element type.
1150     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1151       break;
1152     if (!IsProfitableToFold(Src, Node, Node) ||
1153         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1154       break;
1155 
1156     SDValue VL;
1157     selectVLOp(Node->getOperand(1), VL);
1158 
1159     unsigned ScalarSize = VT.getScalarSizeInBits();
1160     SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
1161 
1162     SDValue Operands[] = {Ld->getBasePtr(),
1163                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1164                           Ld->getChain()};
1165 
1166     RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1167     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1168         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, ScalarSize,
1169         static_cast<unsigned>(LMUL));
1170     MachineSDNode *Load =
1171         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1172 
1173     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1174       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1175 
1176     ReplaceNode(Node, Load);
1177     return;
1178   }
1179   }
1180 
1181   // Select the default instruction.
1182   SelectCode(Node);
1183 }
1184 
1185 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1186     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1187   switch (ConstraintID) {
1188   case InlineAsm::Constraint_m:
1189     // We just support simple memory operands that have a single address
1190     // operand and need no special handling.
1191     OutOps.push_back(Op);
1192     return false;
1193   case InlineAsm::Constraint_A:
1194     OutOps.push_back(Op);
1195     return false;
1196   default:
1197     break;
1198   }
1199 
1200   return true;
1201 }
1202 
1203 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1204   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1205     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1206     return true;
1207   }
1208   return false;
1209 }
1210 
1211 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1212   // If this is FrameIndex, select it directly. Otherwise just let it get
1213   // selected to a register independently.
1214   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1215     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1216   else
1217     Base = Addr;
1218   return true;
1219 }
1220 
1221 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1222                                         SDValue &ShAmt) {
1223   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1224   // amount. If there is an AND on the shift amount, we can bypass it if it
1225   // doesn't affect any of those bits.
1226   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1227     const APInt &AndMask = N->getConstantOperandAPInt(1);
1228 
1229     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1230     // mask that covers the bits needed to represent all shift amounts.
1231     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1232     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1233 
1234     if (ShMask.isSubsetOf(AndMask)) {
1235       ShAmt = N.getOperand(0);
1236       return true;
1237     }
1238 
1239     // SimplifyDemandedBits may have optimized the mask so try restoring any
1240     // bits that are known zero.
1241     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1242     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1243       ShAmt = N.getOperand(0);
1244       return true;
1245     }
1246   }
1247 
1248   ShAmt = N;
1249   return true;
1250 }
1251 
1252 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1253   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1254       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1255     Val = N.getOperand(0);
1256     return true;
1257   }
1258   // FIXME: Should we just call computeNumSignBits here?
1259   if (N.getOpcode() == ISD::AssertSext &&
1260       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1261     Val = N;
1262     return true;
1263   }
1264   if (N.getOpcode() == ISD::AssertZext &&
1265       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLT(MVT::i32)) {
1266     Val = N;
1267     return true;
1268   }
1269 
1270   return false;
1271 }
1272 
1273 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1274   if (N.getOpcode() == ISD::AND) {
1275     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1276     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1277       Val = N.getOperand(0);
1278       return true;
1279     }
1280   }
1281   // FIXME: Should we just call computeKnownBits here?
1282   if (N.getOpcode() == ISD::AssertZext &&
1283       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1284     Val = N;
1285     return true;
1286   }
1287 
1288   return false;
1289 }
1290 
1291 // Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1),
1292 // in which imm = imm0 + imm1 and both imm0 and imm1 are simm12.
1293 bool RISCVDAGToDAGISel::selectAddiPair(SDValue N, SDValue &Val) {
1294   if (auto *ConstOp = dyn_cast<ConstantSDNode>(N)) {
1295     // The immediate operand must have only use.
1296     if (!(ConstOp->hasOneUse()))
1297       return false;
1298     // The immediate operand must be in range [-4096,-2049] or [2048,4094].
1299     int64_t Imm = ConstOp->getSExtValue();
1300     if ((-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094)) {
1301       Val = N;
1302       return true;
1303     }
1304   }
1305   return false;
1306 }
1307 
1308 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
1309 // on RV64).
1310 // SLLIUW is the same as SLLI except for the fact that it clears the bits
1311 // XLEN-1:32 of the input RS1 before shifting.
1312 // A PatFrag has already checked that it has the right structure:
1313 //
1314 //  (AND (SHL RS1, VC2), VC1)
1315 //
1316 // We check that VC2, the shamt is less than 32, otherwise the pattern is
1317 // exactly the same as SLLI and we give priority to that.
1318 // Eventually we check that VC1, the mask used to clear the upper 32 bits
1319 // of RS1, is correct:
1320 //
1321 //  VC1 == (0xFFFFFFFF << VC2)
1322 //
1323 bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
1324   assert(N->getOpcode() == ISD::AND);
1325   assert(N->getOperand(0).getOpcode() == ISD::SHL);
1326   assert(isa<ConstantSDNode>(N->getOperand(1)));
1327   assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
1328 
1329   // The IsRV64 predicate is checked after PatFrag predicates so we can get
1330   // here even on RV32.
1331   if (!Subtarget->is64Bit())
1332     return false;
1333 
1334   SDValue Shl = N->getOperand(0);
1335   uint64_t VC1 = N->getConstantOperandVal(1);
1336   uint64_t VC2 = Shl.getConstantOperandVal(1);
1337 
1338   // Immediate range should be enforced by uimm5 predicate.
1339   assert(VC2 < 32 && "Unexpected immediate");
1340   return (VC1 >> VC2) == UINT64_C(0xFFFFFFFF);
1341 }
1342 
1343 // Select VL as a 5 bit immediate or a value that will become a register. This
1344 // allows us to choose betwen VSETIVLI or VSETVLI later.
1345 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1346   auto *C = dyn_cast<ConstantSDNode>(N);
1347   if (C && isUInt<5>(C->getZExtValue()))
1348     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1349                                    N->getValueType(0));
1350   else
1351     VL = N;
1352 
1353   return true;
1354 }
1355 
1356 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1357   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1358       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1359       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1360     return false;
1361   SplatVal = N.getOperand(0);
1362   return true;
1363 }
1364 
1365 using ValidateFn = bool (*)(int64_t);
1366 
1367 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1368                                    SelectionDAG &DAG,
1369                                    const RISCVSubtarget &Subtarget,
1370                                    ValidateFn ValidateImm) {
1371   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1372        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1373        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1374       !isa<ConstantSDNode>(N.getOperand(0)))
1375     return false;
1376 
1377   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1378 
1379   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1380   // share semantics when the operand type is wider than the resulting vector
1381   // element type: an implicit truncation first takes place. Therefore, perform
1382   // a manual truncation/sign-extension in order to ignore any truncated bits
1383   // and catch any zero-extended immediate.
1384   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1385   // sign-extending to (XLenVT -1).
1386   MVT XLenVT = Subtarget.getXLenVT();
1387   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1388          "Unexpected splat operand type");
1389   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1390   if (EltVT.bitsLT(XLenVT))
1391     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1392 
1393   if (!ValidateImm(SplatImm))
1394     return false;
1395 
1396   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1397   return true;
1398 }
1399 
1400 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1401   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1402                                 [](int64_t Imm) { return isInt<5>(Imm); });
1403 }
1404 
1405 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1406   return selectVSplatSimmHelper(
1407       N, SplatVal, *CurDAG, *Subtarget,
1408       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1409 }
1410 
1411 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1412                                                       SDValue &SplatVal) {
1413   return selectVSplatSimmHelper(
1414       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1415         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1416       });
1417 }
1418 
1419 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1420   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1421        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1422        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1423       !isa<ConstantSDNode>(N.getOperand(0)))
1424     return false;
1425 
1426   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1427 
1428   if (!isUInt<5>(SplatImm))
1429     return false;
1430 
1431   SplatVal =
1432       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1433 
1434   return true;
1435 }
1436 
1437 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1438                                        SDValue &Imm) {
1439   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1440     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1441 
1442     if (!isInt<5>(ImmVal))
1443       return false;
1444 
1445     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1446     return true;
1447   }
1448 
1449   return false;
1450 }
1451 
1452 bool RISCVDAGToDAGISel::selectRVVUimm5(SDValue N, unsigned Width,
1453                                        SDValue &Imm) {
1454   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1455     int64_t ImmVal = C->getSExtValue();
1456 
1457     if (!isUInt<5>(ImmVal))
1458       return false;
1459 
1460     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1461     return true;
1462   }
1463 
1464   return false;
1465 }
1466 
1467 // Merge an ADDI into the offset of a load/store instruction where possible.
1468 // (load (addi base, off1), off2) -> (load base, off1+off2)
1469 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1470 // This is possible when off1+off2 fits a 12-bit immediate.
1471 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
1472   SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
1473   ++Position;
1474 
1475   while (Position != CurDAG->allnodes_begin()) {
1476     SDNode *N = &*--Position;
1477     // Skip dead nodes and any non-machine opcodes.
1478     if (N->use_empty() || !N->isMachineOpcode())
1479       continue;
1480 
1481     int OffsetOpIdx;
1482     int BaseOpIdx;
1483 
1484     // Only attempt this optimisation for I-type loads and S-type stores.
1485     switch (N->getMachineOpcode()) {
1486     default:
1487       continue;
1488     case RISCV::LB:
1489     case RISCV::LH:
1490     case RISCV::LW:
1491     case RISCV::LBU:
1492     case RISCV::LHU:
1493     case RISCV::LWU:
1494     case RISCV::LD:
1495     case RISCV::FLH:
1496     case RISCV::FLW:
1497     case RISCV::FLD:
1498       BaseOpIdx = 0;
1499       OffsetOpIdx = 1;
1500       break;
1501     case RISCV::SB:
1502     case RISCV::SH:
1503     case RISCV::SW:
1504     case RISCV::SD:
1505     case RISCV::FSH:
1506     case RISCV::FSW:
1507     case RISCV::FSD:
1508       BaseOpIdx = 1;
1509       OffsetOpIdx = 2;
1510       break;
1511     }
1512 
1513     if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1514       continue;
1515 
1516     SDValue Base = N->getOperand(BaseOpIdx);
1517 
1518     // If the base is an ADDI, we can merge it in to the load/store.
1519     if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1520       continue;
1521 
1522     SDValue ImmOperand = Base.getOperand(1);
1523     uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1524 
1525     if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1526       int64_t Offset1 = Const->getSExtValue();
1527       int64_t CombinedOffset = Offset1 + Offset2;
1528       if (!isInt<12>(CombinedOffset))
1529         continue;
1530       ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1531                                              ImmOperand.getValueType());
1532     } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1533       // If the off1 in (addi base, off1) is a global variable's address (its
1534       // low part, really), then we can rely on the alignment of that variable
1535       // to provide a margin of safety before off1 can overflow the 12 bits.
1536       // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1537       const DataLayout &DL = CurDAG->getDataLayout();
1538       Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1539       if (Offset2 != 0 && Alignment <= Offset2)
1540         continue;
1541       int64_t Offset1 = GA->getOffset();
1542       int64_t CombinedOffset = Offset1 + Offset2;
1543       ImmOperand = CurDAG->getTargetGlobalAddress(
1544           GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1545           CombinedOffset, GA->getTargetFlags());
1546     } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1547       // Ditto.
1548       Align Alignment = CP->getAlign();
1549       if (Offset2 != 0 && Alignment <= Offset2)
1550         continue;
1551       int64_t Offset1 = CP->getOffset();
1552       int64_t CombinedOffset = Offset1 + Offset2;
1553       ImmOperand = CurDAG->getTargetConstantPool(
1554           CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1555           CombinedOffset, CP->getTargetFlags());
1556     } else {
1557       continue;
1558     }
1559 
1560     LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1561     LLVM_DEBUG(Base->dump(CurDAG));
1562     LLVM_DEBUG(dbgs() << "\nN: ");
1563     LLVM_DEBUG(N->dump(CurDAG));
1564     LLVM_DEBUG(dbgs() << "\n");
1565 
1566     // Modify the offset operand of the load/store.
1567     if (BaseOpIdx == 0) // Load
1568       CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1569                                  N->getOperand(2));
1570     else // Store
1571       CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1572                                  ImmOperand, N->getOperand(3));
1573 
1574     // The add-immediate may now be dead, in which case remove it.
1575     if (Base.getNode()->use_empty())
1576       CurDAG->RemoveDeadNode(Base.getNode());
1577   }
1578 }
1579 
1580 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1581 // for instruction scheduling.
1582 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1583   return new RISCVDAGToDAGISel(TM);
1584 }
1585