1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
44 void RISCVDAGToDAGISel::PreprocessISelDAG() {
45   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46                                        E = CurDAG->allnodes_end();
47        I != E;) {
48     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51     // load. Done after lowering and combining so that we have a chance to
52     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54       continue;
55 
56     assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57     MVT VT = N->getSimpleValueType(0);
58     SDValue Lo = N->getOperand(0);
59     SDValue Hi = N->getOperand(1);
60     SDValue VL = N->getOperand(2);
61     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63            "Unexpected VTs!");
64     MachineFunction &MF = CurDAG->getMachineFunction();
65     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66     SDLoc DL(N);
67 
68     // We use the same frame index we use for moving two i32s into 64-bit FPR.
69     // This is an analogous operation.
70     int FI = FuncInfo->getMoveF64FrameIndex(MF);
71     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73     SDValue StackSlot =
74         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
75 
76     SDValue Chain = CurDAG->getEntryNode();
77     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79     SDValue OffsetSlot =
80         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82                           Align(8));
83 
84     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
85 
86     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87     SDValue IntID =
88         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89     SDValue Ops[] = {Chain, IntID, StackSlot,
90                      CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
92     SDValue Result = CurDAG->getMemIntrinsicNode(
93         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94         MachineMemOperand::MOLoad);
95 
96     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97     // vlse we created.  This will cause general havok on the dag because
98     // anything below the conversion could be folded into other existing nodes.
99     // To avoid invalidating 'I', back it up to the convert node.
100     --I;
101     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
102 
103     // Now that we did that, the node is dead.  Increment the iterator to the
104     // next node to process, then delete N.
105     ++I;
106     CurDAG->DeleteNode(N);
107   }
108 }
109 
110 void RISCVDAGToDAGISel::PostprocessISelDAG() {
111   doPeepholeLoadStoreADDI();
112 }
113 
114 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
115                          MVT XLenVT) {
116   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64);
117 
118   SDNode *Result = nullptr;
119   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
120   for (RISCVMatInt::Inst &Inst : Seq) {
121     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
122     if (Inst.Opc == RISCV::LUI)
123       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
124     else
125       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
126 
127     // Only the first instruction has X0 as its source.
128     SrcReg = SDValue(Result, 0);
129   }
130 
131   return Result;
132 }
133 
134 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
135                                unsigned RegClassID, unsigned SubReg0) {
136   assert(Regs.size() >= 2 && Regs.size() <= 8);
137 
138   SDLoc DL(Regs[0]);
139   SmallVector<SDValue, 8> Ops;
140 
141   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
142 
143   for (unsigned I = 0; I < Regs.size(); ++I) {
144     Ops.push_back(Regs[I]);
145     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
146   }
147   SDNode *N =
148       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
149   return SDValue(N, 0);
150 }
151 
152 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
153                              unsigned NF) {
154   static const unsigned RegClassIDs[] = {
155       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
156       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
157       RISCV::VRN8M1RegClassID};
158 
159   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
160 }
161 
162 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
163                              unsigned NF) {
164   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
165                                          RISCV::VRN3M2RegClassID,
166                                          RISCV::VRN4M2RegClassID};
167 
168   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
169 }
170 
171 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
172                              unsigned NF) {
173   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
174                          RISCV::sub_vrm4_0);
175 }
176 
177 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
178                            unsigned NF, RISCVII::VLMUL LMUL) {
179   switch (LMUL) {
180   default:
181     llvm_unreachable("Invalid LMUL.");
182   case RISCVII::VLMUL::LMUL_F8:
183   case RISCVII::VLMUL::LMUL_F4:
184   case RISCVII::VLMUL::LMUL_F2:
185   case RISCVII::VLMUL::LMUL_1:
186     return createM1Tuple(CurDAG, Regs, NF);
187   case RISCVII::VLMUL::LMUL_2:
188     return createM2Tuple(CurDAG, Regs, NF);
189   case RISCVII::VLMUL::LMUL_4:
190     return createM4Tuple(CurDAG, Regs, NF);
191   }
192 }
193 
194 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
195     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
196     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
197     MVT *IndexVT) {
198   SDValue Chain = Node->getOperand(0);
199   SDValue Glue;
200 
201   SDValue Base;
202   SelectBaseAddr(Node->getOperand(CurOp++), Base);
203   Operands.push_back(Base); // Base pointer.
204 
205   if (IsStridedOrIndexed) {
206     Operands.push_back(Node->getOperand(CurOp++)); // Index.
207     if (IndexVT)
208       *IndexVT = Operands.back()->getSimpleValueType(0);
209   }
210 
211   if (IsMasked) {
212     // Mask needs to be copied to V0.
213     SDValue Mask = Node->getOperand(CurOp++);
214     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
215     Glue = Chain.getValue(1);
216     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
217   }
218   SDValue VL;
219   selectVLOp(Node->getOperand(CurOp++), VL);
220   Operands.push_back(VL);
221 
222   MVT XLenVT = Subtarget->getXLenVT();
223   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
224   Operands.push_back(SEWOp);
225 
226   Operands.push_back(Chain); // Chain.
227   if (Glue)
228     Operands.push_back(Glue);
229 }
230 
231 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
232                                     bool IsStrided) {
233   SDLoc DL(Node);
234   unsigned NF = Node->getNumValues() - 1;
235   MVT VT = Node->getSimpleValueType(0);
236   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
237   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
238 
239   unsigned CurOp = 2;
240   SmallVector<SDValue, 8> Operands;
241   if (IsMasked) {
242     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
243                                  Node->op_begin() + CurOp + NF);
244     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
245     Operands.push_back(MaskedOff);
246     CurOp += NF;
247   }
248 
249   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
250                              Operands);
251 
252   const RISCV::VLSEGPseudo *P =
253       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
254                             static_cast<unsigned>(LMUL));
255   MachineSDNode *Load =
256       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
257 
258   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
259     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
260 
261   SDValue SuperReg = SDValue(Load, 0);
262   for (unsigned I = 0; I < NF; ++I) {
263     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
264     ReplaceUses(SDValue(Node, I),
265                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
266   }
267 
268   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
269   CurDAG->RemoveDeadNode(Node);
270 }
271 
272 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
273   SDLoc DL(Node);
274   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
275   MVT VT = Node->getSimpleValueType(0);
276   MVT XLenVT = Subtarget->getXLenVT();
277   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
278   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
279 
280   unsigned CurOp = 2;
281   SmallVector<SDValue, 7> Operands;
282   if (IsMasked) {
283     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
284                                  Node->op_begin() + CurOp + NF);
285     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
286     Operands.push_back(MaskedOff);
287     CurOp += NF;
288   }
289 
290   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
291                              /*IsStridedOrIndexed*/ false, Operands);
292 
293   const RISCV::VLSEGPseudo *P =
294       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
295                             Log2SEW, static_cast<unsigned>(LMUL));
296   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
297                                                MVT::Other, MVT::Glue, Operands);
298   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
299                                           /*Glue*/ SDValue(Load, 2));
300 
301   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
302     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
303 
304   SDValue SuperReg = SDValue(Load, 0);
305   for (unsigned I = 0; I < NF; ++I) {
306     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
307     ReplaceUses(SDValue(Node, I),
308                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
309   }
310 
311   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
312   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
313   CurDAG->RemoveDeadNode(Node);
314 }
315 
316 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
317                                      bool IsOrdered) {
318   SDLoc DL(Node);
319   unsigned NF = Node->getNumValues() - 1;
320   MVT VT = Node->getSimpleValueType(0);
321   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
322   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
323 
324   unsigned CurOp = 2;
325   SmallVector<SDValue, 8> Operands;
326   if (IsMasked) {
327     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
328                                  Node->op_begin() + CurOp + NF);
329     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
330     Operands.push_back(MaskedOff);
331     CurOp += NF;
332   }
333 
334   MVT IndexVT;
335   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
336                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
337 
338   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
339          "Element count mismatch");
340 
341   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
342   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
343   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
344       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
345       static_cast<unsigned>(IndexLMUL));
346   MachineSDNode *Load =
347       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
348 
349   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
350     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
351 
352   SDValue SuperReg = SDValue(Load, 0);
353   for (unsigned I = 0; I < NF; ++I) {
354     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
355     ReplaceUses(SDValue(Node, I),
356                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
357   }
358 
359   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
360   CurDAG->RemoveDeadNode(Node);
361 }
362 
363 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
364                                     bool IsStrided) {
365   SDLoc DL(Node);
366   unsigned NF = Node->getNumOperands() - 4;
367   if (IsStrided)
368     NF--;
369   if (IsMasked)
370     NF--;
371   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
372   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
373   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
374   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
375   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
376 
377   SmallVector<SDValue, 8> Operands;
378   Operands.push_back(StoreVal);
379   unsigned CurOp = 2 + NF;
380 
381   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
382                              Operands);
383 
384   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
385       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
386   MachineSDNode *Store =
387       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
388 
389   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
390     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
391 
392   ReplaceNode(Node, Store);
393 }
394 
395 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
396                                      bool IsOrdered) {
397   SDLoc DL(Node);
398   unsigned NF = Node->getNumOperands() - 5;
399   if (IsMasked)
400     --NF;
401   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
402   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
403   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
404   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
405   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
406 
407   SmallVector<SDValue, 8> Operands;
408   Operands.push_back(StoreVal);
409   unsigned CurOp = 2 + NF;
410 
411   MVT IndexVT;
412   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
413                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
414 
415   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
416          "Element count mismatch");
417 
418   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
419   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
420   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
421       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
422       static_cast<unsigned>(IndexLMUL));
423   MachineSDNode *Store =
424       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
425 
426   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
427     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
428 
429   ReplaceNode(Node, Store);
430 }
431 
432 
433 void RISCVDAGToDAGISel::Select(SDNode *Node) {
434   // If we have a custom node, we have already selected.
435   if (Node->isMachineOpcode()) {
436     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
437     Node->setNodeId(-1);
438     return;
439   }
440 
441   // Instruction Selection not handled by the auto-generated tablegen selection
442   // should be handled here.
443   unsigned Opcode = Node->getOpcode();
444   MVT XLenVT = Subtarget->getXLenVT();
445   SDLoc DL(Node);
446   MVT VT = Node->getSimpleValueType(0);
447 
448   switch (Opcode) {
449   case ISD::Constant: {
450     auto *ConstNode = cast<ConstantSDNode>(Node);
451     if (VT == XLenVT && ConstNode->isNullValue()) {
452       SDValue New =
453           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
454       ReplaceNode(Node, New.getNode());
455       return;
456     }
457     ReplaceNode(Node, selectImm(CurDAG, DL, ConstNode->getSExtValue(), XLenVT));
458     return;
459   }
460   case ISD::FrameIndex: {
461     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
462     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
463     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
464     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
465     return;
466   }
467   case ISD::SRL: {
468     // We don't need this transform if zext.h is supported.
469     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
470       break;
471     // Optimize (srl (and X, 0xffff), C) ->
472     //          (srli (slli X, (XLen-16), (XLen-16) + C)
473     // Taking into account that the 0xffff may have had lower bits unset by
474     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
475     // This pattern occurs when type legalizing i16 right shifts.
476     // FIXME: This could be extended to other AND masks.
477     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
478     if (N1C) {
479       uint64_t ShAmt = N1C->getZExtValue();
480       SDValue N0 = Node->getOperand(0);
481       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
482           isa<ConstantSDNode>(N0.getOperand(1))) {
483         uint64_t Mask = N0.getConstantOperandVal(1);
484         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
485         if (Mask == 0xffff) {
486           unsigned LShAmt = Subtarget->getXLen() - 16;
487           SDNode *SLLI =
488               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
489                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
490           SDNode *SRLI = CurDAG->getMachineNode(
491               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
492               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
493           ReplaceNode(Node, SRLI);
494           return;
495         }
496       }
497     }
498 
499     break;
500   }
501   case ISD::INTRINSIC_WO_CHAIN: {
502     unsigned IntNo = Node->getConstantOperandVal(0);
503     switch (IntNo) {
504       // By default we do not custom select any intrinsic.
505     default:
506       break;
507     case Intrinsic::riscv_vmsgeu:
508     case Intrinsic::riscv_vmsge: {
509       SDValue Src1 = Node->getOperand(1);
510       SDValue Src2 = Node->getOperand(2);
511       // Only custom select scalar second operand.
512       if (Src2.getValueType() != XLenVT)
513         break;
514       // Small constants are handled with patterns.
515       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
516         int64_t CVal = C->getSExtValue();
517         if (CVal >= -15 && CVal <= 16)
518           break;
519       }
520       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
521       MVT Src1VT = Src1.getSimpleValueType();
522       unsigned VMSLTOpcode, VMNANDOpcode;
523       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
524       default:
525         llvm_unreachable("Unexpected LMUL!");
526       case RISCVII::VLMUL::LMUL_F8:
527         VMSLTOpcode =
528             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
529         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
530         break;
531       case RISCVII::VLMUL::LMUL_F4:
532         VMSLTOpcode =
533             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
534         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
535         break;
536       case RISCVII::VLMUL::LMUL_F2:
537         VMSLTOpcode =
538             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
539         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
540         break;
541       case RISCVII::VLMUL::LMUL_1:
542         VMSLTOpcode =
543             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
544         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
545         break;
546       case RISCVII::VLMUL::LMUL_2:
547         VMSLTOpcode =
548             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
549         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
550         break;
551       case RISCVII::VLMUL::LMUL_4:
552         VMSLTOpcode =
553             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
554         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
555         break;
556       case RISCVII::VLMUL::LMUL_8:
557         VMSLTOpcode =
558             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
559         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
560         break;
561       }
562       SDValue SEW = CurDAG->getTargetConstant(
563           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
564       SDValue VL;
565       selectVLOp(Node->getOperand(3), VL);
566 
567       // Expand to
568       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
569       SDValue Cmp = SDValue(
570           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
571           0);
572       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
573                                                {Cmp, Cmp, VL, SEW}));
574       return;
575     }
576     case Intrinsic::riscv_vmsgeu_mask:
577     case Intrinsic::riscv_vmsge_mask: {
578       SDValue Src1 = Node->getOperand(2);
579       SDValue Src2 = Node->getOperand(3);
580       // Only custom select scalar second operand.
581       if (Src2.getValueType() != XLenVT)
582         break;
583       // Small constants are handled with patterns.
584       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
585         int64_t CVal = C->getSExtValue();
586         if (CVal >= -15 && CVal <= 16)
587           break;
588       }
589       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
590       MVT Src1VT = Src1.getSimpleValueType();
591       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
592       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
593       default:
594         llvm_unreachable("Unexpected LMUL!");
595       case RISCVII::VLMUL::LMUL_F8:
596         VMSLTOpcode =
597             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
598         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
599                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
600         break;
601       case RISCVII::VLMUL::LMUL_F4:
602         VMSLTOpcode =
603             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
604         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
605                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
606         break;
607       case RISCVII::VLMUL::LMUL_F2:
608         VMSLTOpcode =
609             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
610         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
611                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
612         break;
613       case RISCVII::VLMUL::LMUL_1:
614         VMSLTOpcode =
615             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
616         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
617                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
618         break;
619       case RISCVII::VLMUL::LMUL_2:
620         VMSLTOpcode =
621             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
622         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
623                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
624         break;
625       case RISCVII::VLMUL::LMUL_4:
626         VMSLTOpcode =
627             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
628         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
629                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
630         break;
631       case RISCVII::VLMUL::LMUL_8:
632         VMSLTOpcode =
633             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
634         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
635                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
636         break;
637       }
638       // Mask operations use the LMUL from the mask type.
639       switch (RISCVTargetLowering::getLMUL(VT)) {
640       default:
641         llvm_unreachable("Unexpected LMUL!");
642       case RISCVII::VLMUL::LMUL_F8:
643         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
644         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
645         break;
646       case RISCVII::VLMUL::LMUL_F4:
647         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
648         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
649         break;
650       case RISCVII::VLMUL::LMUL_F2:
651         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
652         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
653         break;
654       case RISCVII::VLMUL::LMUL_1:
655         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
656         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
657         break;
658       case RISCVII::VLMUL::LMUL_2:
659         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
660         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
661         break;
662       case RISCVII::VLMUL::LMUL_4:
663         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
664         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
665         break;
666       case RISCVII::VLMUL::LMUL_8:
667         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
668         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
669         break;
670       }
671       SDValue SEW = CurDAG->getTargetConstant(
672           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
673       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
674       SDValue VL;
675       selectVLOp(Node->getOperand(5), VL);
676       SDValue MaskedOff = Node->getOperand(1);
677       SDValue Mask = Node->getOperand(4);
678       // If the MaskedOff value and the Mask are the same value use
679       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
680       // This avoids needing to copy v0 to vd before starting the next sequence.
681       if (Mask == MaskedOff) {
682         SDValue Cmp = SDValue(
683             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
684             0);
685         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
686                                                  {Mask, Cmp, VL, MaskSEW}));
687         return;
688       }
689 
690       // Otherwise use
691       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
692       SDValue Cmp = SDValue(
693           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
694                                  {MaskedOff, Src1, Src2, Mask, VL, SEW}),
695           0);
696       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
697                                                {Cmp, Mask, VL, MaskSEW}));
698       return;
699     }
700     }
701     break;
702   }
703   case ISD::INTRINSIC_W_CHAIN: {
704     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
705     switch (IntNo) {
706       // By default we do not custom select any intrinsic.
707     default:
708       break;
709 
710     case Intrinsic::riscv_vsetvli:
711     case Intrinsic::riscv_vsetvlimax: {
712       if (!Subtarget->hasStdExtV())
713         break;
714 
715       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
716       unsigned Offset = VLMax ? 2 : 3;
717 
718       assert(Node->getNumOperands() == Offset + 2 &&
719              "Unexpected number of operands");
720 
721       unsigned SEW =
722           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
723       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
724           Node->getConstantOperandVal(Offset + 1) & 0x7);
725 
726       unsigned VTypeI = RISCVVType::encodeVTYPE(
727           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
728       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
729 
730       SDValue VLOperand;
731       if (VLMax) {
732         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
733       } else {
734         VLOperand = Node->getOperand(2);
735 
736         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
737           uint64_t AVL = C->getZExtValue();
738           if (isUInt<5>(AVL)) {
739             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
740             ReplaceNode(
741                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
742                                              MVT::Other, VLImm, VTypeIOp,
743                                              /* Chain */ Node->getOperand(0)));
744             return;
745           }
746         }
747       }
748 
749       ReplaceNode(Node,
750                   CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
751                                          MVT::Other, VLOperand, VTypeIOp,
752                                          /* Chain */ Node->getOperand(0)));
753       return;
754     }
755     case Intrinsic::riscv_vlseg2:
756     case Intrinsic::riscv_vlseg3:
757     case Intrinsic::riscv_vlseg4:
758     case Intrinsic::riscv_vlseg5:
759     case Intrinsic::riscv_vlseg6:
760     case Intrinsic::riscv_vlseg7:
761     case Intrinsic::riscv_vlseg8: {
762       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
763       return;
764     }
765     case Intrinsic::riscv_vlseg2_mask:
766     case Intrinsic::riscv_vlseg3_mask:
767     case Intrinsic::riscv_vlseg4_mask:
768     case Intrinsic::riscv_vlseg5_mask:
769     case Intrinsic::riscv_vlseg6_mask:
770     case Intrinsic::riscv_vlseg7_mask:
771     case Intrinsic::riscv_vlseg8_mask: {
772       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
773       return;
774     }
775     case Intrinsic::riscv_vlsseg2:
776     case Intrinsic::riscv_vlsseg3:
777     case Intrinsic::riscv_vlsseg4:
778     case Intrinsic::riscv_vlsseg5:
779     case Intrinsic::riscv_vlsseg6:
780     case Intrinsic::riscv_vlsseg7:
781     case Intrinsic::riscv_vlsseg8: {
782       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
783       return;
784     }
785     case Intrinsic::riscv_vlsseg2_mask:
786     case Intrinsic::riscv_vlsseg3_mask:
787     case Intrinsic::riscv_vlsseg4_mask:
788     case Intrinsic::riscv_vlsseg5_mask:
789     case Intrinsic::riscv_vlsseg6_mask:
790     case Intrinsic::riscv_vlsseg7_mask:
791     case Intrinsic::riscv_vlsseg8_mask: {
792       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
793       return;
794     }
795     case Intrinsic::riscv_vloxseg2:
796     case Intrinsic::riscv_vloxseg3:
797     case Intrinsic::riscv_vloxseg4:
798     case Intrinsic::riscv_vloxseg5:
799     case Intrinsic::riscv_vloxseg6:
800     case Intrinsic::riscv_vloxseg7:
801     case Intrinsic::riscv_vloxseg8:
802       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
803       return;
804     case Intrinsic::riscv_vluxseg2:
805     case Intrinsic::riscv_vluxseg3:
806     case Intrinsic::riscv_vluxseg4:
807     case Intrinsic::riscv_vluxseg5:
808     case Intrinsic::riscv_vluxseg6:
809     case Intrinsic::riscv_vluxseg7:
810     case Intrinsic::riscv_vluxseg8:
811       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
812       return;
813     case Intrinsic::riscv_vloxseg2_mask:
814     case Intrinsic::riscv_vloxseg3_mask:
815     case Intrinsic::riscv_vloxseg4_mask:
816     case Intrinsic::riscv_vloxseg5_mask:
817     case Intrinsic::riscv_vloxseg6_mask:
818     case Intrinsic::riscv_vloxseg7_mask:
819     case Intrinsic::riscv_vloxseg8_mask:
820       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
821       return;
822     case Intrinsic::riscv_vluxseg2_mask:
823     case Intrinsic::riscv_vluxseg3_mask:
824     case Intrinsic::riscv_vluxseg4_mask:
825     case Intrinsic::riscv_vluxseg5_mask:
826     case Intrinsic::riscv_vluxseg6_mask:
827     case Intrinsic::riscv_vluxseg7_mask:
828     case Intrinsic::riscv_vluxseg8_mask:
829       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
830       return;
831     case Intrinsic::riscv_vlseg8ff:
832     case Intrinsic::riscv_vlseg7ff:
833     case Intrinsic::riscv_vlseg6ff:
834     case Intrinsic::riscv_vlseg5ff:
835     case Intrinsic::riscv_vlseg4ff:
836     case Intrinsic::riscv_vlseg3ff:
837     case Intrinsic::riscv_vlseg2ff: {
838       selectVLSEGFF(Node, /*IsMasked*/ false);
839       return;
840     }
841     case Intrinsic::riscv_vlseg8ff_mask:
842     case Intrinsic::riscv_vlseg7ff_mask:
843     case Intrinsic::riscv_vlseg6ff_mask:
844     case Intrinsic::riscv_vlseg5ff_mask:
845     case Intrinsic::riscv_vlseg4ff_mask:
846     case Intrinsic::riscv_vlseg3ff_mask:
847     case Intrinsic::riscv_vlseg2ff_mask: {
848       selectVLSEGFF(Node, /*IsMasked*/ true);
849       return;
850     }
851     case Intrinsic::riscv_vloxei:
852     case Intrinsic::riscv_vloxei_mask:
853     case Intrinsic::riscv_vluxei:
854     case Intrinsic::riscv_vluxei_mask: {
855       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
856                       IntNo == Intrinsic::riscv_vluxei_mask;
857       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
858                        IntNo == Intrinsic::riscv_vloxei_mask;
859 
860       MVT VT = Node->getSimpleValueType(0);
861       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
862 
863       unsigned CurOp = 2;
864       SmallVector<SDValue, 8> Operands;
865       if (IsMasked)
866         Operands.push_back(Node->getOperand(CurOp++));
867 
868       MVT IndexVT;
869       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
870                                  /*IsStridedOrIndexed*/ true, Operands,
871                                  &IndexVT);
872 
873       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
874              "Element count mismatch");
875 
876       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
877       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
878       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
879       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
880           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
881           static_cast<unsigned>(IndexLMUL));
882       MachineSDNode *Load =
883           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
884 
885       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
886         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
887 
888       ReplaceNode(Node, Load);
889       return;
890     }
891     case Intrinsic::riscv_vle1:
892     case Intrinsic::riscv_vle:
893     case Intrinsic::riscv_vle_mask:
894     case Intrinsic::riscv_vlse:
895     case Intrinsic::riscv_vlse_mask: {
896       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
897                       IntNo == Intrinsic::riscv_vlse_mask;
898       bool IsStrided =
899           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
900 
901       MVT VT = Node->getSimpleValueType(0);
902       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
903 
904       unsigned CurOp = 2;
905       SmallVector<SDValue, 8> Operands;
906       if (IsMasked)
907         Operands.push_back(Node->getOperand(CurOp++));
908 
909       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
910                                  Operands);
911 
912       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
913       const RISCV::VLEPseudo *P =
914           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
915                               static_cast<unsigned>(LMUL));
916       MachineSDNode *Load =
917           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
918 
919       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
920         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
921 
922       ReplaceNode(Node, Load);
923       return;
924     }
925     case Intrinsic::riscv_vleff:
926     case Intrinsic::riscv_vleff_mask: {
927       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
928 
929       MVT VT = Node->getSimpleValueType(0);
930       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
931 
932       unsigned CurOp = 2;
933       SmallVector<SDValue, 7> Operands;
934       if (IsMasked)
935         Operands.push_back(Node->getOperand(CurOp++));
936 
937       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
938                                  /*IsStridedOrIndexed*/ false, Operands);
939 
940       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
941       const RISCV::VLEPseudo *P =
942           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
943                               static_cast<unsigned>(LMUL));
944       MachineSDNode *Load =
945           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
946                                  MVT::Other, MVT::Glue, Operands);
947       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
948                                               /*Glue*/ SDValue(Load, 2));
949 
950       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
951         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
952 
953       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
954       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
955       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
956       CurDAG->RemoveDeadNode(Node);
957       return;
958     }
959     }
960     break;
961   }
962   case ISD::INTRINSIC_VOID: {
963     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
964     switch (IntNo) {
965     case Intrinsic::riscv_vsseg2:
966     case Intrinsic::riscv_vsseg3:
967     case Intrinsic::riscv_vsseg4:
968     case Intrinsic::riscv_vsseg5:
969     case Intrinsic::riscv_vsseg6:
970     case Intrinsic::riscv_vsseg7:
971     case Intrinsic::riscv_vsseg8: {
972       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
973       return;
974     }
975     case Intrinsic::riscv_vsseg2_mask:
976     case Intrinsic::riscv_vsseg3_mask:
977     case Intrinsic::riscv_vsseg4_mask:
978     case Intrinsic::riscv_vsseg5_mask:
979     case Intrinsic::riscv_vsseg6_mask:
980     case Intrinsic::riscv_vsseg7_mask:
981     case Intrinsic::riscv_vsseg8_mask: {
982       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
983       return;
984     }
985     case Intrinsic::riscv_vssseg2:
986     case Intrinsic::riscv_vssseg3:
987     case Intrinsic::riscv_vssseg4:
988     case Intrinsic::riscv_vssseg5:
989     case Intrinsic::riscv_vssseg6:
990     case Intrinsic::riscv_vssseg7:
991     case Intrinsic::riscv_vssseg8: {
992       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
993       return;
994     }
995     case Intrinsic::riscv_vssseg2_mask:
996     case Intrinsic::riscv_vssseg3_mask:
997     case Intrinsic::riscv_vssseg4_mask:
998     case Intrinsic::riscv_vssseg5_mask:
999     case Intrinsic::riscv_vssseg6_mask:
1000     case Intrinsic::riscv_vssseg7_mask:
1001     case Intrinsic::riscv_vssseg8_mask: {
1002       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1003       return;
1004     }
1005     case Intrinsic::riscv_vsoxseg2:
1006     case Intrinsic::riscv_vsoxseg3:
1007     case Intrinsic::riscv_vsoxseg4:
1008     case Intrinsic::riscv_vsoxseg5:
1009     case Intrinsic::riscv_vsoxseg6:
1010     case Intrinsic::riscv_vsoxseg7:
1011     case Intrinsic::riscv_vsoxseg8:
1012       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1013       return;
1014     case Intrinsic::riscv_vsuxseg2:
1015     case Intrinsic::riscv_vsuxseg3:
1016     case Intrinsic::riscv_vsuxseg4:
1017     case Intrinsic::riscv_vsuxseg5:
1018     case Intrinsic::riscv_vsuxseg6:
1019     case Intrinsic::riscv_vsuxseg7:
1020     case Intrinsic::riscv_vsuxseg8:
1021       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1022       return;
1023     case Intrinsic::riscv_vsoxseg2_mask:
1024     case Intrinsic::riscv_vsoxseg3_mask:
1025     case Intrinsic::riscv_vsoxseg4_mask:
1026     case Intrinsic::riscv_vsoxseg5_mask:
1027     case Intrinsic::riscv_vsoxseg6_mask:
1028     case Intrinsic::riscv_vsoxseg7_mask:
1029     case Intrinsic::riscv_vsoxseg8_mask:
1030       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1031       return;
1032     case Intrinsic::riscv_vsuxseg2_mask:
1033     case Intrinsic::riscv_vsuxseg3_mask:
1034     case Intrinsic::riscv_vsuxseg4_mask:
1035     case Intrinsic::riscv_vsuxseg5_mask:
1036     case Intrinsic::riscv_vsuxseg6_mask:
1037     case Intrinsic::riscv_vsuxseg7_mask:
1038     case Intrinsic::riscv_vsuxseg8_mask:
1039       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1040       return;
1041     case Intrinsic::riscv_vsoxei:
1042     case Intrinsic::riscv_vsoxei_mask:
1043     case Intrinsic::riscv_vsuxei:
1044     case Intrinsic::riscv_vsuxei_mask: {
1045       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1046                       IntNo == Intrinsic::riscv_vsuxei_mask;
1047       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1048                        IntNo == Intrinsic::riscv_vsoxei_mask;
1049 
1050       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1051       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1052 
1053       unsigned CurOp = 2;
1054       SmallVector<SDValue, 8> Operands;
1055       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1056 
1057       MVT IndexVT;
1058       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1059                                  /*IsStridedOrIndexed*/ true, Operands,
1060                                  &IndexVT);
1061 
1062       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1063              "Element count mismatch");
1064 
1065       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1066       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1067       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1068       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1069           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1070           static_cast<unsigned>(IndexLMUL));
1071       MachineSDNode *Store =
1072           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1073 
1074       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1075         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1076 
1077       ReplaceNode(Node, Store);
1078       return;
1079     }
1080     case Intrinsic::riscv_vse1:
1081     case Intrinsic::riscv_vse:
1082     case Intrinsic::riscv_vse_mask:
1083     case Intrinsic::riscv_vsse:
1084     case Intrinsic::riscv_vsse_mask: {
1085       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1086                       IntNo == Intrinsic::riscv_vsse_mask;
1087       bool IsStrided =
1088           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1089 
1090       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1091       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1092 
1093       unsigned CurOp = 2;
1094       SmallVector<SDValue, 8> Operands;
1095       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1096 
1097       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1098                                  Operands);
1099 
1100       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1101       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1102           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1103       MachineSDNode *Store =
1104           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1105       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1106         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1107 
1108       ReplaceNode(Node, Store);
1109       return;
1110     }
1111     }
1112     break;
1113   }
1114   case ISD::BITCAST: {
1115     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1116     // Just drop bitcasts between vectors if both are fixed or both are
1117     // scalable.
1118     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1119         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1120       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1121       CurDAG->RemoveDeadNode(Node);
1122       return;
1123     }
1124     break;
1125   }
1126   case ISD::INSERT_SUBVECTOR: {
1127     SDValue V = Node->getOperand(0);
1128     SDValue SubV = Node->getOperand(1);
1129     SDLoc DL(SubV);
1130     auto Idx = Node->getConstantOperandVal(2);
1131     MVT SubVecVT = SubV.getSimpleValueType();
1132 
1133     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1134     MVT SubVecContainerVT = SubVecVT;
1135     // Establish the correct scalable-vector types for any fixed-length type.
1136     if (SubVecVT.isFixedLengthVector())
1137       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1138     if (VT.isFixedLengthVector())
1139       VT = TLI.getContainerForFixedLengthVector(VT);
1140 
1141     const auto *TRI = Subtarget->getRegisterInfo();
1142     unsigned SubRegIdx;
1143     std::tie(SubRegIdx, Idx) =
1144         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1145             VT, SubVecContainerVT, Idx, TRI);
1146 
1147     // If the Idx hasn't been completely eliminated then this is a subvector
1148     // insert which doesn't naturally align to a vector register. These must
1149     // be handled using instructions to manipulate the vector registers.
1150     if (Idx != 0)
1151       break;
1152 
1153     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1154     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1155                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1156                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1157     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1158     assert((!IsSubVecPartReg || V.isUndef()) &&
1159            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1160            "the subvector is smaller than a full-sized register");
1161 
1162     // If we haven't set a SubRegIdx, then we must be going between
1163     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1164     if (SubRegIdx == RISCV::NoSubRegister) {
1165       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1166       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1167                  InRegClassID &&
1168              "Unexpected subvector extraction");
1169       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1170       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1171                                                DL, VT, SubV, RC);
1172       ReplaceNode(Node, NewNode);
1173       return;
1174     }
1175 
1176     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1177     ReplaceNode(Node, Insert.getNode());
1178     return;
1179   }
1180   case ISD::EXTRACT_SUBVECTOR: {
1181     SDValue V = Node->getOperand(0);
1182     auto Idx = Node->getConstantOperandVal(1);
1183     MVT InVT = V.getSimpleValueType();
1184     SDLoc DL(V);
1185 
1186     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1187     MVT SubVecContainerVT = VT;
1188     // Establish the correct scalable-vector types for any fixed-length type.
1189     if (VT.isFixedLengthVector())
1190       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1191     if (InVT.isFixedLengthVector())
1192       InVT = TLI.getContainerForFixedLengthVector(InVT);
1193 
1194     const auto *TRI = Subtarget->getRegisterInfo();
1195     unsigned SubRegIdx;
1196     std::tie(SubRegIdx, Idx) =
1197         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1198             InVT, SubVecContainerVT, Idx, TRI);
1199 
1200     // If the Idx hasn't been completely eliminated then this is a subvector
1201     // extract which doesn't naturally align to a vector register. These must
1202     // be handled using instructions to manipulate the vector registers.
1203     if (Idx != 0)
1204       break;
1205 
1206     // If we haven't set a SubRegIdx, then we must be going between
1207     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1208     if (SubRegIdx == RISCV::NoSubRegister) {
1209       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1210       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1211                  InRegClassID &&
1212              "Unexpected subvector extraction");
1213       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1214       SDNode *NewNode =
1215           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1216       ReplaceNode(Node, NewNode);
1217       return;
1218     }
1219 
1220     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1221     ReplaceNode(Node, Extract.getNode());
1222     return;
1223   }
1224   case RISCVISD::VMV_V_X_VL:
1225   case RISCVISD::VFMV_V_F_VL: {
1226     // Try to match splat of a scalar load to a strided load with stride of x0.
1227     SDValue Src = Node->getOperand(0);
1228     auto *Ld = dyn_cast<LoadSDNode>(Src);
1229     if (!Ld)
1230       break;
1231     EVT MemVT = Ld->getMemoryVT();
1232     // The memory VT should be the same size as the element type.
1233     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1234       break;
1235     if (!IsProfitableToFold(Src, Node, Node) ||
1236         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1237       break;
1238 
1239     SDValue VL;
1240     selectVLOp(Node->getOperand(1), VL);
1241 
1242     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1243     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1244 
1245     SDValue Operands[] = {Ld->getBasePtr(),
1246                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1247                           Ld->getChain()};
1248 
1249     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1250     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1251         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1252         static_cast<unsigned>(LMUL));
1253     MachineSDNode *Load =
1254         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1255 
1256     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1257       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1258 
1259     ReplaceNode(Node, Load);
1260     return;
1261   }
1262   }
1263 
1264   // Select the default instruction.
1265   SelectCode(Node);
1266 }
1267 
1268 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1269     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1270   switch (ConstraintID) {
1271   case InlineAsm::Constraint_m:
1272     // We just support simple memory operands that have a single address
1273     // operand and need no special handling.
1274     OutOps.push_back(Op);
1275     return false;
1276   case InlineAsm::Constraint_A:
1277     OutOps.push_back(Op);
1278     return false;
1279   default:
1280     break;
1281   }
1282 
1283   return true;
1284 }
1285 
1286 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1287   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1288     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1289     return true;
1290   }
1291   return false;
1292 }
1293 
1294 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1295   // If this is FrameIndex, select it directly. Otherwise just let it get
1296   // selected to a register independently.
1297   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1298     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1299   else
1300     Base = Addr;
1301   return true;
1302 }
1303 
1304 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1305                                         SDValue &ShAmt) {
1306   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1307   // amount. If there is an AND on the shift amount, we can bypass it if it
1308   // doesn't affect any of those bits.
1309   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1310     const APInt &AndMask = N->getConstantOperandAPInt(1);
1311 
1312     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1313     // mask that covers the bits needed to represent all shift amounts.
1314     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1315     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1316 
1317     if (ShMask.isSubsetOf(AndMask)) {
1318       ShAmt = N.getOperand(0);
1319       return true;
1320     }
1321 
1322     // SimplifyDemandedBits may have optimized the mask so try restoring any
1323     // bits that are known zero.
1324     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1325     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1326       ShAmt = N.getOperand(0);
1327       return true;
1328     }
1329   }
1330 
1331   ShAmt = N;
1332   return true;
1333 }
1334 
1335 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1336   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1337       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1338     Val = N.getOperand(0);
1339     return true;
1340   }
1341   MVT VT = N.getSimpleValueType();
1342   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1343     Val = N;
1344     return true;
1345   }
1346 
1347   return false;
1348 }
1349 
1350 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1351   if (N.getOpcode() == ISD::AND) {
1352     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1353     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1354       Val = N.getOperand(0);
1355       return true;
1356     }
1357   }
1358   MVT VT = N.getSimpleValueType();
1359   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1360   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1361     Val = N;
1362     return true;
1363   }
1364 
1365   return false;
1366 }
1367 
1368 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
1369 // on RV64).
1370 // SLLIUW is the same as SLLI except for the fact that it clears the bits
1371 // XLEN-1:32 of the input RS1 before shifting.
1372 // A PatFrag has already checked that it has the right structure:
1373 //
1374 //  (AND (SHL RS1, VC2), VC1)
1375 //
1376 // We check that VC2, the shamt is less than 32, otherwise the pattern is
1377 // exactly the same as SLLI and we give priority to that.
1378 // Eventually we check that VC1, the mask used to clear the upper 32 bits
1379 // of RS1, is correct:
1380 //
1381 //  VC1 == (0xFFFFFFFF << VC2)
1382 //
1383 bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
1384   assert(N->getOpcode() == ISD::AND);
1385   assert(N->getOperand(0).getOpcode() == ISD::SHL);
1386   assert(isa<ConstantSDNode>(N->getOperand(1)));
1387   assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
1388 
1389   // The IsRV64 predicate is checked after PatFrag predicates so we can get
1390   // here even on RV32.
1391   if (!Subtarget->is64Bit())
1392     return false;
1393 
1394   SDValue Shl = N->getOperand(0);
1395   uint64_t VC1 = N->getConstantOperandVal(1);
1396   uint64_t VC2 = Shl.getConstantOperandVal(1);
1397 
1398   // Immediate range should be enforced by uimm5 predicate.
1399   assert(VC2 < 32 && "Unexpected immediate");
1400   return (VC1 >> VC2) == UINT64_C(0xFFFFFFFF);
1401 }
1402 
1403 // Select VL as a 5 bit immediate or a value that will become a register. This
1404 // allows us to choose betwen VSETIVLI or VSETVLI later.
1405 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1406   auto *C = dyn_cast<ConstantSDNode>(N);
1407   if (C && isUInt<5>(C->getZExtValue()))
1408     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1409                                    N->getValueType(0));
1410   else
1411     VL = N;
1412 
1413   return true;
1414 }
1415 
1416 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1417   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1418       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1419       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1420     return false;
1421   SplatVal = N.getOperand(0);
1422   return true;
1423 }
1424 
1425 using ValidateFn = bool (*)(int64_t);
1426 
1427 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1428                                    SelectionDAG &DAG,
1429                                    const RISCVSubtarget &Subtarget,
1430                                    ValidateFn ValidateImm) {
1431   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1432        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1433        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1434       !isa<ConstantSDNode>(N.getOperand(0)))
1435     return false;
1436 
1437   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1438 
1439   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1440   // share semantics when the operand type is wider than the resulting vector
1441   // element type: an implicit truncation first takes place. Therefore, perform
1442   // a manual truncation/sign-extension in order to ignore any truncated bits
1443   // and catch any zero-extended immediate.
1444   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1445   // sign-extending to (XLenVT -1).
1446   MVT XLenVT = Subtarget.getXLenVT();
1447   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1448          "Unexpected splat operand type");
1449   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1450   if (EltVT.bitsLT(XLenVT))
1451     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1452 
1453   if (!ValidateImm(SplatImm))
1454     return false;
1455 
1456   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1457   return true;
1458 }
1459 
1460 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1461   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1462                                 [](int64_t Imm) { return isInt<5>(Imm); });
1463 }
1464 
1465 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1466   return selectVSplatSimmHelper(
1467       N, SplatVal, *CurDAG, *Subtarget,
1468       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1469 }
1470 
1471 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1472                                                       SDValue &SplatVal) {
1473   return selectVSplatSimmHelper(
1474       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1475         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1476       });
1477 }
1478 
1479 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1480   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1481        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1482        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1483       !isa<ConstantSDNode>(N.getOperand(0)))
1484     return false;
1485 
1486   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1487 
1488   if (!isUInt<5>(SplatImm))
1489     return false;
1490 
1491   SplatVal =
1492       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1493 
1494   return true;
1495 }
1496 
1497 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1498                                        SDValue &Imm) {
1499   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1500     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1501 
1502     if (!isInt<5>(ImmVal))
1503       return false;
1504 
1505     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1506     return true;
1507   }
1508 
1509   return false;
1510 }
1511 
1512 // Merge an ADDI into the offset of a load/store instruction where possible.
1513 // (load (addi base, off1), off2) -> (load base, off1+off2)
1514 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1515 // This is possible when off1+off2 fits a 12-bit immediate.
1516 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
1517   SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
1518   ++Position;
1519 
1520   while (Position != CurDAG->allnodes_begin()) {
1521     SDNode *N = &*--Position;
1522     // Skip dead nodes and any non-machine opcodes.
1523     if (N->use_empty() || !N->isMachineOpcode())
1524       continue;
1525 
1526     int OffsetOpIdx;
1527     int BaseOpIdx;
1528 
1529     // Only attempt this optimisation for I-type loads and S-type stores.
1530     switch (N->getMachineOpcode()) {
1531     default:
1532       continue;
1533     case RISCV::LB:
1534     case RISCV::LH:
1535     case RISCV::LW:
1536     case RISCV::LBU:
1537     case RISCV::LHU:
1538     case RISCV::LWU:
1539     case RISCV::LD:
1540     case RISCV::FLH:
1541     case RISCV::FLW:
1542     case RISCV::FLD:
1543       BaseOpIdx = 0;
1544       OffsetOpIdx = 1;
1545       break;
1546     case RISCV::SB:
1547     case RISCV::SH:
1548     case RISCV::SW:
1549     case RISCV::SD:
1550     case RISCV::FSH:
1551     case RISCV::FSW:
1552     case RISCV::FSD:
1553       BaseOpIdx = 1;
1554       OffsetOpIdx = 2;
1555       break;
1556     }
1557 
1558     if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1559       continue;
1560 
1561     SDValue Base = N->getOperand(BaseOpIdx);
1562 
1563     // If the base is an ADDI, we can merge it in to the load/store.
1564     if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1565       continue;
1566 
1567     SDValue ImmOperand = Base.getOperand(1);
1568     uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1569 
1570     if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1571       int64_t Offset1 = Const->getSExtValue();
1572       int64_t CombinedOffset = Offset1 + Offset2;
1573       if (!isInt<12>(CombinedOffset))
1574         continue;
1575       ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1576                                              ImmOperand.getValueType());
1577     } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1578       // If the off1 in (addi base, off1) is a global variable's address (its
1579       // low part, really), then we can rely on the alignment of that variable
1580       // to provide a margin of safety before off1 can overflow the 12 bits.
1581       // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1582       const DataLayout &DL = CurDAG->getDataLayout();
1583       Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1584       if (Offset2 != 0 && Alignment <= Offset2)
1585         continue;
1586       int64_t Offset1 = GA->getOffset();
1587       int64_t CombinedOffset = Offset1 + Offset2;
1588       ImmOperand = CurDAG->getTargetGlobalAddress(
1589           GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1590           CombinedOffset, GA->getTargetFlags());
1591     } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1592       // Ditto.
1593       Align Alignment = CP->getAlign();
1594       if (Offset2 != 0 && Alignment <= Offset2)
1595         continue;
1596       int64_t Offset1 = CP->getOffset();
1597       int64_t CombinedOffset = Offset1 + Offset2;
1598       ImmOperand = CurDAG->getTargetConstantPool(
1599           CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1600           CombinedOffset, CP->getTargetFlags());
1601     } else {
1602       continue;
1603     }
1604 
1605     LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1606     LLVM_DEBUG(Base->dump(CurDAG));
1607     LLVM_DEBUG(dbgs() << "\nN: ");
1608     LLVM_DEBUG(N->dump(CurDAG));
1609     LLVM_DEBUG(dbgs() << "\n");
1610 
1611     // Modify the offset operand of the load/store.
1612     if (BaseOpIdx == 0) // Load
1613       CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1614                                  N->getOperand(2));
1615     else // Store
1616       CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1617                                  ImmOperand, N->getOperand(3));
1618 
1619     // The add-immediate may now be dead, in which case remove it.
1620     if (Base.getNode()->use_empty())
1621       CurDAG->RemoveDeadNode(Base.getNode());
1622   }
1623 }
1624 
1625 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1626 // for instruction scheduling.
1627 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1628   return new RISCVDAGToDAGISel(TM);
1629 }
1630