1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
45 void RISCVDAGToDAGISel::PreprocessISelDAG() {
46   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
47                                        E = CurDAG->allnodes_end();
48        I != E;) {
49     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50 
51     // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
52     // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
53     if (N->getOpcode() == ISD::SPLAT_VECTOR) {
54       MVT VT = N->getSimpleValueType(0);
55       unsigned Opc =
56           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
57       SDLoc DL(N);
58       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
59       SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
60                                        N->getOperand(0), VL);
61 
62       --I;
63       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
64       ++I;
65       CurDAG->DeleteNode(N);
66       continue;
67     }
68 
69     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70     // load. Done after lowering and combining so that we have a chance to
71     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
73       continue;
74 
75     assert(N->getNumOperands() == 4 && "Unexpected number of operands");
76     MVT VT = N->getSimpleValueType(0);
77     SDValue Passthru = N->getOperand(0);
78     SDValue Lo = N->getOperand(1);
79     SDValue Hi = N->getOperand(2);
80     SDValue VL = N->getOperand(3);
81     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
82            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
83            "Unexpected VTs!");
84     MachineFunction &MF = CurDAG->getMachineFunction();
85     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
86     SDLoc DL(N);
87 
88     // We use the same frame index we use for moving two i32s into 64-bit FPR.
89     // This is an analogous operation.
90     int FI = FuncInfo->getMoveF64FrameIndex(MF);
91     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
92     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
93     SDValue StackSlot =
94         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
95 
96     SDValue Chain = CurDAG->getEntryNode();
97     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
98 
99     SDValue OffsetSlot =
100         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
101     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
102                           Align(8));
103 
104     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
105 
106     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
107     SDValue IntID =
108         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
109     SDValue Ops[] = {Chain,
110                      IntID,
111                      Passthru,
112                      StackSlot,
113                      CurDAG->getRegister(RISCV::X0, MVT::i64),
114                      VL};
115 
116     SDValue Result = CurDAG->getMemIntrinsicNode(
117         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
118         MachineMemOperand::MOLoad);
119 
120     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
121     // vlse we created.  This will cause general havok on the dag because
122     // anything below the conversion could be folded into other existing nodes.
123     // To avoid invalidating 'I', back it up to the convert node.
124     --I;
125     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
126 
127     // Now that we did that, the node is dead.  Increment the iterator to the
128     // next node to process, then delete N.
129     ++I;
130     CurDAG->DeleteNode(N);
131   }
132 }
133 
134 void RISCVDAGToDAGISel::PostprocessISelDAG() {
135   HandleSDNode Dummy(CurDAG->getRoot());
136   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
137 
138   bool MadeChange = false;
139   while (Position != CurDAG->allnodes_begin()) {
140     SDNode *N = &*--Position;
141     // Skip dead nodes and any non-machine opcodes.
142     if (N->use_empty() || !N->isMachineOpcode())
143       continue;
144 
145     MadeChange |= doPeepholeSExtW(N);
146     MadeChange |= doPeepholeLoadStoreADDI(N);
147     MadeChange |= doPeepholeMaskedRVV(N);
148   }
149 
150   CurDAG->setRoot(Dummy.getValue());
151 
152   if (MadeChange)
153     CurDAG->RemoveDeadNodes();
154 }
155 
156 static SDNode *selectImmWithConstantPool(SelectionDAG *CurDAG, const SDLoc &DL,
157                                          const MVT VT, int64_t Imm,
158                                          const RISCVSubtarget &Subtarget) {
159   assert(VT == MVT::i64 && "Expecting MVT::i64");
160   const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
161   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(CurDAG->getConstantPool(
162       ConstantInt::get(EVT(VT).getTypeForEVT(*CurDAG->getContext()), Imm), VT));
163   SDValue Addr = TLI->getAddr(CP, *CurDAG);
164   SDValue Offset = CurDAG->getTargetConstant(0, DL, VT);
165   // Since there is no data race, the chain can be the entry node.
166   SDNode *Load = CurDAG->getMachineNode(RISCV::LD, DL, VT, Addr, Offset,
167                                         CurDAG->getEntryNode());
168   MachineFunction &MF = CurDAG->getMachineFunction();
169   MachineMemOperand *MemOp = MF.getMachineMemOperand(
170       MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
171       LLT(VT), CP->getAlign());
172   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Load), {MemOp});
173   return Load;
174 }
175 
176 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
177                          int64_t Imm, const RISCVSubtarget &Subtarget) {
178   MVT XLenVT = Subtarget.getXLenVT();
179   RISCVMatInt::InstSeq Seq =
180       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
181 
182   // If Imm is expensive to build, then we put it into constant pool.
183   if (Subtarget.useConstantPoolForLargeInts() &&
184       Seq.size() > Subtarget.getMaxBuildIntsCost())
185     return selectImmWithConstantPool(CurDAG, DL, VT, Imm, Subtarget);
186 
187   SDNode *Result = nullptr;
188   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
189   for (RISCVMatInt::Inst &Inst : Seq) {
190     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
191     if (Inst.Opc == RISCV::LUI)
192       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
193     else if (Inst.Opc == RISCV::ADD_UW)
194       Result = CurDAG->getMachineNode(RISCV::ADD_UW, DL, XLenVT, SrcReg,
195                                       CurDAG->getRegister(RISCV::X0, XLenVT));
196     else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
197              Inst.Opc == RISCV::SH3ADD)
198       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
199     else
200       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
201 
202     // Only the first instruction has X0 as its source.
203     SrcReg = SDValue(Result, 0);
204   }
205 
206   return Result;
207 }
208 
209 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
210                                unsigned RegClassID, unsigned SubReg0) {
211   assert(Regs.size() >= 2 && Regs.size() <= 8);
212 
213   SDLoc DL(Regs[0]);
214   SmallVector<SDValue, 8> Ops;
215 
216   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
217 
218   for (unsigned I = 0; I < Regs.size(); ++I) {
219     Ops.push_back(Regs[I]);
220     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
221   }
222   SDNode *N =
223       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
224   return SDValue(N, 0);
225 }
226 
227 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
228                              unsigned NF) {
229   static const unsigned RegClassIDs[] = {
230       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
231       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
232       RISCV::VRN8M1RegClassID};
233 
234   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
235 }
236 
237 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
238                              unsigned NF) {
239   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
240                                          RISCV::VRN3M2RegClassID,
241                                          RISCV::VRN4M2RegClassID};
242 
243   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
244 }
245 
246 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
247                              unsigned NF) {
248   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
249                          RISCV::sub_vrm4_0);
250 }
251 
252 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
253                            unsigned NF, RISCVII::VLMUL LMUL) {
254   switch (LMUL) {
255   default:
256     llvm_unreachable("Invalid LMUL.");
257   case RISCVII::VLMUL::LMUL_F8:
258   case RISCVII::VLMUL::LMUL_F4:
259   case RISCVII::VLMUL::LMUL_F2:
260   case RISCVII::VLMUL::LMUL_1:
261     return createM1Tuple(CurDAG, Regs, NF);
262   case RISCVII::VLMUL::LMUL_2:
263     return createM2Tuple(CurDAG, Regs, NF);
264   case RISCVII::VLMUL::LMUL_4:
265     return createM4Tuple(CurDAG, Regs, NF);
266   }
267 }
268 
269 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
270     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
271     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
272     bool IsLoad, MVT *IndexVT) {
273   SDValue Chain = Node->getOperand(0);
274   SDValue Glue;
275 
276   SDValue Base;
277   SelectBaseAddr(Node->getOperand(CurOp++), Base);
278   Operands.push_back(Base); // Base pointer.
279 
280   if (IsStridedOrIndexed) {
281     Operands.push_back(Node->getOperand(CurOp++)); // Index.
282     if (IndexVT)
283       *IndexVT = Operands.back()->getSimpleValueType(0);
284   }
285 
286   if (IsMasked) {
287     // Mask needs to be copied to V0.
288     SDValue Mask = Node->getOperand(CurOp++);
289     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
290     Glue = Chain.getValue(1);
291     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
292   }
293   SDValue VL;
294   selectVLOp(Node->getOperand(CurOp++), VL);
295   Operands.push_back(VL);
296 
297   MVT XLenVT = Subtarget->getXLenVT();
298   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
299   Operands.push_back(SEWOp);
300 
301   // Masked load has the tail policy argument.
302   if (IsMasked && IsLoad) {
303     // Policy must be a constant.
304     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
305     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
306     Operands.push_back(PolicyOp);
307   }
308 
309   Operands.push_back(Chain); // Chain.
310   if (Glue)
311     Operands.push_back(Glue);
312 }
313 
314 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
315                                     bool IsStrided) {
316   SDLoc DL(Node);
317   unsigned NF = Node->getNumValues() - 1;
318   MVT VT = Node->getSimpleValueType(0);
319   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
320   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
321 
322   unsigned CurOp = 2;
323   SmallVector<SDValue, 8> Operands;
324   if (IsMasked) {
325     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
326                                  Node->op_begin() + CurOp + NF);
327     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
328     Operands.push_back(MaskedOff);
329     CurOp += NF;
330   }
331 
332   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
333                              Operands, /*IsLoad=*/true);
334 
335   const RISCV::VLSEGPseudo *P =
336       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
337                             static_cast<unsigned>(LMUL));
338   MachineSDNode *Load =
339       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
340 
341   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
342     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
343 
344   SDValue SuperReg = SDValue(Load, 0);
345   for (unsigned I = 0; I < NF; ++I) {
346     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
347     ReplaceUses(SDValue(Node, I),
348                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
349   }
350 
351   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
352   CurDAG->RemoveDeadNode(Node);
353 }
354 
355 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
356   SDLoc DL(Node);
357   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
358   MVT VT = Node->getSimpleValueType(0);
359   MVT XLenVT = Subtarget->getXLenVT();
360   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
361   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
362 
363   unsigned CurOp = 2;
364   SmallVector<SDValue, 7> Operands;
365   if (IsMasked) {
366     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
367                                  Node->op_begin() + CurOp + NF);
368     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
369     Operands.push_back(MaskedOff);
370     CurOp += NF;
371   }
372 
373   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
374                              /*IsStridedOrIndexed*/ false, Operands,
375                              /*IsLoad=*/true);
376 
377   const RISCV::VLSEGPseudo *P =
378       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
379                             Log2SEW, static_cast<unsigned>(LMUL));
380   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
381                                                MVT::Other, MVT::Glue, Operands);
382   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
383                                           /*Glue*/ SDValue(Load, 2));
384 
385   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
386     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
387 
388   SDValue SuperReg = SDValue(Load, 0);
389   for (unsigned I = 0; I < NF; ++I) {
390     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
391     ReplaceUses(SDValue(Node, I),
392                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
393   }
394 
395   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
396   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
397   CurDAG->RemoveDeadNode(Node);
398 }
399 
400 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
401                                      bool IsOrdered) {
402   SDLoc DL(Node);
403   unsigned NF = Node->getNumValues() - 1;
404   MVT VT = Node->getSimpleValueType(0);
405   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
406   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
407 
408   unsigned CurOp = 2;
409   SmallVector<SDValue, 8> Operands;
410   if (IsMasked) {
411     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
412                                  Node->op_begin() + CurOp + NF);
413     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
414     Operands.push_back(MaskedOff);
415     CurOp += NF;
416   }
417 
418   MVT IndexVT;
419   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
420                              /*IsStridedOrIndexed*/ true, Operands,
421                              /*IsLoad=*/true, &IndexVT);
422 
423   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
424          "Element count mismatch");
425 
426   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
427   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
428   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
429     report_fatal_error("The V extension does not support EEW=64 for index "
430                        "values when XLEN=32");
431   }
432   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
433       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
434       static_cast<unsigned>(IndexLMUL));
435   MachineSDNode *Load =
436       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
437 
438   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
439     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
440 
441   SDValue SuperReg = SDValue(Load, 0);
442   for (unsigned I = 0; I < NF; ++I) {
443     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
444     ReplaceUses(SDValue(Node, I),
445                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
446   }
447 
448   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
449   CurDAG->RemoveDeadNode(Node);
450 }
451 
452 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
453                                     bool IsStrided) {
454   SDLoc DL(Node);
455   unsigned NF = Node->getNumOperands() - 4;
456   if (IsStrided)
457     NF--;
458   if (IsMasked)
459     NF--;
460   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
461   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
462   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
463   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
464   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
465 
466   SmallVector<SDValue, 8> Operands;
467   Operands.push_back(StoreVal);
468   unsigned CurOp = 2 + NF;
469 
470   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
471                              Operands);
472 
473   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
474       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
475   MachineSDNode *Store =
476       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
477 
478   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
479     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
480 
481   ReplaceNode(Node, Store);
482 }
483 
484 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
485                                      bool IsOrdered) {
486   SDLoc DL(Node);
487   unsigned NF = Node->getNumOperands() - 5;
488   if (IsMasked)
489     --NF;
490   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
491   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
492   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
493   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
494   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
495 
496   SmallVector<SDValue, 8> Operands;
497   Operands.push_back(StoreVal);
498   unsigned CurOp = 2 + NF;
499 
500   MVT IndexVT;
501   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
502                              /*IsStridedOrIndexed*/ true, Operands,
503                              /*IsLoad=*/false, &IndexVT);
504 
505   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
506          "Element count mismatch");
507 
508   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
509   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
510   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
511     report_fatal_error("The V extension does not support EEW=64 for index "
512                        "values when XLEN=32");
513   }
514   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
515       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
516       static_cast<unsigned>(IndexLMUL));
517   MachineSDNode *Store =
518       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
519 
520   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
521     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
522 
523   ReplaceNode(Node, Store);
524 }
525 
526 void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
527   if (!Subtarget->hasVInstructions())
528     return;
529 
530   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
531           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
532          "Unexpected opcode");
533 
534   SDLoc DL(Node);
535   MVT XLenVT = Subtarget->getXLenVT();
536 
537   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
538   unsigned IntNoOffset = HasChain ? 1 : 0;
539   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
540 
541   assert((IntNo == Intrinsic::riscv_vsetvli ||
542           IntNo == Intrinsic::riscv_vsetvlimax ||
543           IntNo == Intrinsic::riscv_vsetvli_opt ||
544           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
545          "Unexpected vsetvli intrinsic");
546 
547   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
548                IntNo == Intrinsic::riscv_vsetvlimax_opt;
549   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
550 
551   assert(Node->getNumOperands() == Offset + 2 &&
552          "Unexpected number of operands");
553 
554   unsigned SEW =
555       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
556   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
557       Node->getConstantOperandVal(Offset + 1) & 0x7);
558 
559   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
560                                             /*MaskAgnostic*/ false);
561   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
562 
563   SmallVector<EVT, 2> VTs = {XLenVT};
564   if (HasChain)
565     VTs.push_back(MVT::Other);
566 
567   SDValue VLOperand;
568   unsigned Opcode = RISCV::PseudoVSETVLI;
569   if (VLMax) {
570     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
571     Opcode = RISCV::PseudoVSETVLIX0;
572   } else {
573     VLOperand = Node->getOperand(IntNoOffset + 1);
574 
575     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
576       uint64_t AVL = C->getZExtValue();
577       if (isUInt<5>(AVL)) {
578         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
579         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
580         if (HasChain)
581           Ops.push_back(Node->getOperand(0));
582         ReplaceNode(
583             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
584         return;
585       }
586     }
587   }
588 
589   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
590   if (HasChain)
591     Ops.push_back(Node->getOperand(0));
592 
593   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
594 }
595 
596 void RISCVDAGToDAGISel::Select(SDNode *Node) {
597   // If we have a custom node, we have already selected.
598   if (Node->isMachineOpcode()) {
599     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
600     Node->setNodeId(-1);
601     return;
602   }
603 
604   // Instruction Selection not handled by the auto-generated tablegen selection
605   // should be handled here.
606   unsigned Opcode = Node->getOpcode();
607   MVT XLenVT = Subtarget->getXLenVT();
608   SDLoc DL(Node);
609   MVT VT = Node->getSimpleValueType(0);
610 
611   switch (Opcode) {
612   case ISD::Constant: {
613     auto *ConstNode = cast<ConstantSDNode>(Node);
614     if (VT == XLenVT && ConstNode->isZero()) {
615       SDValue New =
616           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
617       ReplaceNode(Node, New.getNode());
618       return;
619     }
620     int64_t Imm = ConstNode->getSExtValue();
621     // If the upper XLen-16 bits are not used, try to convert this to a simm12
622     // by sign extending bit 15.
623     if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
624         hasAllHUsers(Node))
625       Imm = SignExtend64(Imm, 16);
626     // If the upper 32-bits are not used try to convert this into a simm32 by
627     // sign extending bit 32.
628     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
629       Imm = SignExtend64(Imm, 32);
630 
631     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
632     return;
633   }
634   case ISD::FrameIndex: {
635     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
636     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
637     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
638     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
639     return;
640   }
641   case ISD::SRL: {
642     // Optimize (srl (and X, C2), C) ->
643     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
644     // Where C2 is a mask with C3 trailing ones.
645     // Taking into account that the C2 may have had lower bits unset by
646     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
647     // This pattern occurs when type legalizing right shifts for types with
648     // less than XLen bits.
649     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
650     if (!N1C)
651       break;
652     SDValue N0 = Node->getOperand(0);
653     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
654         !isa<ConstantSDNode>(N0.getOperand(1)))
655       break;
656     unsigned ShAmt = N1C->getZExtValue();
657     uint64_t Mask = N0.getConstantOperandVal(1);
658     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
659     if (!isMask_64(Mask))
660       break;
661     unsigned TrailingOnes = countTrailingOnes(Mask);
662     // 32 trailing ones should use srliw via tablegen pattern.
663     if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
664       break;
665     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
666     SDNode *SLLI =
667         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
668                                CurDAG->getTargetConstant(LShAmt, DL, VT));
669     SDNode *SRLI = CurDAG->getMachineNode(
670         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
671         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
672     ReplaceNode(Node, SRLI);
673     return;
674   }
675   case ISD::SRA: {
676     // Optimize (sra (sext_inreg X, i16), C) ->
677     //          (srai (slli X, (XLen-16), (XLen-16) + C)
678     // And      (sra (sext_inreg X, i8), C) ->
679     //          (srai (slli X, (XLen-8), (XLen-8) + C)
680     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
681     // This transform matches the code we get without Zbb. The shifts are more
682     // compressible, and this can help expose CSE opportunities in the sdiv by
683     // constant optimization.
684     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
685     if (!N1C)
686       break;
687     SDValue N0 = Node->getOperand(0);
688     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
689       break;
690     unsigned ShAmt = N1C->getZExtValue();
691     unsigned ExtSize =
692         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
693     // ExtSize of 32 should use sraiw via tablegen pattern.
694     if (ExtSize >= 32 || ShAmt >= ExtSize)
695       break;
696     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
697     SDNode *SLLI =
698         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
699                                CurDAG->getTargetConstant(LShAmt, DL, VT));
700     SDNode *SRAI = CurDAG->getMachineNode(
701         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
702         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
703     ReplaceNode(Node, SRAI);
704     return;
705   }
706   case ISD::AND: {
707     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
708     if (!N1C)
709       break;
710 
711     SDValue N0 = Node->getOperand(0);
712 
713     bool LeftShift = N0.getOpcode() == ISD::SHL;
714     if (!LeftShift && N0.getOpcode() != ISD::SRL)
715       break;
716 
717     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
718     if (!C)
719       break;
720     uint64_t C2 = C->getZExtValue();
721     unsigned XLen = Subtarget->getXLen();
722     if (!C2 || C2 >= XLen)
723       break;
724 
725     uint64_t C1 = N1C->getZExtValue();
726 
727     // Keep track of whether this is an andi.
728     bool IsANDI = isInt<12>(N1C->getSExtValue());
729 
730     // Clear irrelevant bits in the mask.
731     if (LeftShift)
732       C1 &= maskTrailingZeros<uint64_t>(C2);
733     else
734       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
735 
736     // Some transforms should only be done if the shift has a single use or
737     // the AND would become (srli (slli X, 32), 32)
738     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
739 
740     SDValue X = N0.getOperand(0);
741 
742     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
743     // with c3 leading zeros.
744     if (!LeftShift && isMask_64(C1)) {
745       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
746       if (C2 < C3) {
747         // If the number of leading zeros is C2+32 this can be SRLIW.
748         if (C2 + 32 == C3) {
749           SDNode *SRLIW =
750               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
751                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
752           ReplaceNode(Node, SRLIW);
753           return;
754         }
755 
756         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
757         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
758         //
759         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
760         // legalized and goes through DAG combine.
761         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
762             X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
763             cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
764           SDNode *SRAIW =
765               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, X.getOperand(0),
766                                      CurDAG->getTargetConstant(31, DL, XLenVT));
767           SDNode *SRLIW = CurDAG->getMachineNode(
768               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
769               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
770           ReplaceNode(Node, SRLIW);
771           return;
772         }
773 
774         // (srli (slli x, c3-c2), c3).
775         // Skip it in order to select sraiw.
776         bool Skip = Subtarget->hasStdExtZba() && C3 == 32 &&
777                     X.getOpcode() == ISD::SIGN_EXTEND_INREG;
778         if (OneUseOrZExtW && !IsANDI && !Skip) {
779           SDNode *SLLI = CurDAG->getMachineNode(
780               RISCV::SLLI, DL, XLenVT, X,
781               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
782           SDNode *SRLI =
783               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
784                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
785           ReplaceNode(Node, SRLI);
786           return;
787         }
788       }
789     }
790 
791     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
792     // shifted by c2 bits with c3 leading zeros.
793     if (LeftShift && isShiftedMask_64(C1)) {
794       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
795 
796       if (C2 + C3 < XLen &&
797           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
798         // Use slli.uw when possible.
799         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
800           SDNode *SLLI_UW =
801               CurDAG->getMachineNode(RISCV::SLLI_UW, DL, XLenVT, X,
802                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
803           ReplaceNode(Node, SLLI_UW);
804           return;
805         }
806 
807         // (srli (slli c2+c3), c3)
808         if (OneUseOrZExtW && !IsANDI) {
809           SDNode *SLLI = CurDAG->getMachineNode(
810               RISCV::SLLI, DL, XLenVT, X,
811               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
812           SDNode *SRLI =
813               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
814                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
815           ReplaceNode(Node, SRLI);
816           return;
817         }
818       }
819     }
820 
821     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
822     // shifted mask with c2 leading zeros and c3 trailing zeros.
823     if (!LeftShift && isShiftedMask_64(C1)) {
824       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
825       uint64_t C3 = countTrailingZeros(C1);
826       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsANDI) {
827         SDNode *SRLI = CurDAG->getMachineNode(
828             RISCV::SRLI, DL, XLenVT, X,
829             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
830         SDNode *SLLI =
831             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
832                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
833         ReplaceNode(Node, SLLI);
834         return;
835       }
836       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
837       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
838           OneUseOrZExtW && !IsANDI) {
839         SDNode *SRLIW = CurDAG->getMachineNode(
840             RISCV::SRLIW, DL, XLenVT, X,
841             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
842         SDNode *SLLI =
843             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
844                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
845         ReplaceNode(Node, SLLI);
846         return;
847       }
848     }
849 
850     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
851     // shifted mask with no leading zeros and c3 trailing zeros.
852     if (LeftShift && isShiftedMask_64(C1)) {
853       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
854       uint64_t C3 = countTrailingZeros(C1);
855       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsANDI) {
856         SDNode *SRLI = CurDAG->getMachineNode(
857             RISCV::SRLI, DL, XLenVT, X,
858             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
859         SDNode *SLLI =
860             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
861                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
862         ReplaceNode(Node, SLLI);
863         return;
864       }
865       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
866       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsANDI) {
867         SDNode *SRLIW = CurDAG->getMachineNode(
868             RISCV::SRLIW, DL, XLenVT, X,
869             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
870         SDNode *SLLI =
871             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
872                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
873         ReplaceNode(Node, SLLI);
874         return;
875       }
876     }
877 
878     break;
879   }
880   case ISD::MUL: {
881     // Special case for calculating (mul (and X, C2), C1) where the full product
882     // fits in XLen bits. We can shift X left by the number of leading zeros in
883     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
884     // product has XLen trailing zeros, putting it in the output of MULHU. This
885     // can avoid materializing a constant in a register for C2.
886 
887     // RHS should be a constant.
888     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
889     if (!N1C || !N1C->hasOneUse())
890       break;
891 
892     // LHS should be an AND with constant.
893     SDValue N0 = Node->getOperand(0);
894     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
895       break;
896 
897     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
898 
899     // Constant should be a mask.
900     if (!isMask_64(C2))
901       break;
902 
903     // This should be the only use of the AND unless we will use
904     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
905     // constants.
906     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
907       break;
908 
909     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
910     // optimization.
911     if (isInt<12>(C2) ||
912         (C2 == UINT64_C(0xFFFF) &&
913          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
914         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
915       break;
916 
917     // We need to shift left the AND input and C1 by a total of XLen bits.
918 
919     // How far left do we need to shift the AND input?
920     unsigned XLen = Subtarget->getXLen();
921     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
922 
923     // The constant gets shifted by the remaining amount unless that would
924     // shift bits out.
925     uint64_t C1 = N1C->getZExtValue();
926     unsigned ConstantShift = XLen - LeadingZeros;
927     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
928       break;
929 
930     uint64_t ShiftedC1 = C1 << ConstantShift;
931     // If this RV32, we need to sign extend the constant.
932     if (XLen == 32)
933       ShiftedC1 = SignExtend64(ShiftedC1, 32);
934 
935     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
936     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
937     SDNode *SLLI =
938         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
939                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
940     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
941                                            SDValue(SLLI, 0), SDValue(Imm, 0));
942     ReplaceNode(Node, MULHU);
943     return;
944   }
945   case ISD::INTRINSIC_WO_CHAIN: {
946     unsigned IntNo = Node->getConstantOperandVal(0);
947     switch (IntNo) {
948       // By default we do not custom select any intrinsic.
949     default:
950       break;
951     case Intrinsic::riscv_vmsgeu:
952     case Intrinsic::riscv_vmsge: {
953       SDValue Src1 = Node->getOperand(1);
954       SDValue Src2 = Node->getOperand(2);
955       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
956       bool IsCmpUnsignedZero = false;
957       // Only custom select scalar second operand.
958       if (Src2.getValueType() != XLenVT)
959         break;
960       // Small constants are handled with patterns.
961       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
962         int64_t CVal = C->getSExtValue();
963         if (CVal >= -15 && CVal <= 16) {
964           if (!IsUnsigned || CVal != 0)
965             break;
966           IsCmpUnsignedZero = true;
967         }
968       }
969       MVT Src1VT = Src1.getSimpleValueType();
970       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
971       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
972       default:
973         llvm_unreachable("Unexpected LMUL!");
974 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
975   case RISCVII::VLMUL::lmulenum:                                               \
976     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
977                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
978     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
979     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
980     break;
981         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
982         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
983         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
984         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
985         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
986         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
987         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
988 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
989       }
990       SDValue SEW = CurDAG->getTargetConstant(
991           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
992       SDValue VL;
993       selectVLOp(Node->getOperand(3), VL);
994 
995       // If vmsgeu with 0 immediate, expand it to vmset.
996       if (IsCmpUnsignedZero) {
997         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
998         return;
999       }
1000 
1001       // Expand to
1002       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1003       SDValue Cmp = SDValue(
1004           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1005           0);
1006       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1007                                                {Cmp, Cmp, VL, SEW}));
1008       return;
1009     }
1010     case Intrinsic::riscv_vmsgeu_mask:
1011     case Intrinsic::riscv_vmsge_mask: {
1012       SDValue Src1 = Node->getOperand(2);
1013       SDValue Src2 = Node->getOperand(3);
1014       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1015       bool IsCmpUnsignedZero = false;
1016       // Only custom select scalar second operand.
1017       if (Src2.getValueType() != XLenVT)
1018         break;
1019       // Small constants are handled with patterns.
1020       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1021         int64_t CVal = C->getSExtValue();
1022         if (CVal >= -15 && CVal <= 16) {
1023           if (!IsUnsigned || CVal != 0)
1024             break;
1025           IsCmpUnsignedZero = true;
1026         }
1027       }
1028       MVT Src1VT = Src1.getSimpleValueType();
1029       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1030           VMOROpcode;
1031       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1032       default:
1033         llvm_unreachable("Unexpected LMUL!");
1034 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
1035   case RISCVII::VLMUL::lmulenum:                                               \
1036     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1037                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1038     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
1039                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1040     break;
1041         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1042         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1043         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1044         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1045         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1046         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1047         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1048 #undef CASE_VMSLT_OPCODES
1049       }
1050       // Mask operations use the LMUL from the mask type.
1051       switch (RISCVTargetLowering::getLMUL(VT)) {
1052       default:
1053         llvm_unreachable("Unexpected LMUL!");
1054 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
1055   case RISCVII::VLMUL::lmulenum:                                               \
1056     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
1057     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1058     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1059     break;
1060         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1061         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1062         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1063         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1064         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1065         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1066         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1067 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1068       }
1069       SDValue SEW = CurDAG->getTargetConstant(
1070           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1071       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1072       SDValue VL;
1073       selectVLOp(Node->getOperand(5), VL);
1074       SDValue MaskedOff = Node->getOperand(1);
1075       SDValue Mask = Node->getOperand(4);
1076 
1077       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1078       if (IsCmpUnsignedZero) {
1079         // We don't need vmor if the MaskedOff and the Mask are the same
1080         // value.
1081         if (Mask == MaskedOff) {
1082           ReplaceUses(Node, Mask.getNode());
1083           return;
1084         }
1085         ReplaceNode(Node,
1086                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1087                                            {Mask, MaskedOff, VL, MaskSEW}));
1088         return;
1089       }
1090 
1091       // If the MaskedOff value and the Mask are the same value use
1092       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1093       // This avoids needing to copy v0 to vd before starting the next sequence.
1094       if (Mask == MaskedOff) {
1095         SDValue Cmp = SDValue(
1096             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1097             0);
1098         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1099                                                  {Mask, Cmp, VL, MaskSEW}));
1100         return;
1101       }
1102 
1103       // Mask needs to be copied to V0.
1104       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1105                                            RISCV::V0, Mask, SDValue());
1106       SDValue Glue = Chain.getValue(1);
1107       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1108 
1109       // Otherwise use
1110       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1111       SDValue Cmp = SDValue(
1112           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1113                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1114           0);
1115       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1116                                                {Cmp, Mask, VL, MaskSEW}));
1117       return;
1118     }
1119     case Intrinsic::riscv_vsetvli_opt:
1120     case Intrinsic::riscv_vsetvlimax_opt:
1121       return selectVSETVLI(Node);
1122     }
1123     break;
1124   }
1125   case ISD::INTRINSIC_W_CHAIN: {
1126     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1127     switch (IntNo) {
1128       // By default we do not custom select any intrinsic.
1129     default:
1130       break;
1131     case Intrinsic::riscv_vsetvli:
1132     case Intrinsic::riscv_vsetvlimax:
1133       return selectVSETVLI(Node);
1134     case Intrinsic::riscv_vlseg2:
1135     case Intrinsic::riscv_vlseg3:
1136     case Intrinsic::riscv_vlseg4:
1137     case Intrinsic::riscv_vlseg5:
1138     case Intrinsic::riscv_vlseg6:
1139     case Intrinsic::riscv_vlseg7:
1140     case Intrinsic::riscv_vlseg8: {
1141       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1142       return;
1143     }
1144     case Intrinsic::riscv_vlseg2_mask:
1145     case Intrinsic::riscv_vlseg3_mask:
1146     case Intrinsic::riscv_vlseg4_mask:
1147     case Intrinsic::riscv_vlseg5_mask:
1148     case Intrinsic::riscv_vlseg6_mask:
1149     case Intrinsic::riscv_vlseg7_mask:
1150     case Intrinsic::riscv_vlseg8_mask: {
1151       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1152       return;
1153     }
1154     case Intrinsic::riscv_vlsseg2:
1155     case Intrinsic::riscv_vlsseg3:
1156     case Intrinsic::riscv_vlsseg4:
1157     case Intrinsic::riscv_vlsseg5:
1158     case Intrinsic::riscv_vlsseg6:
1159     case Intrinsic::riscv_vlsseg7:
1160     case Intrinsic::riscv_vlsseg8: {
1161       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1162       return;
1163     }
1164     case Intrinsic::riscv_vlsseg2_mask:
1165     case Intrinsic::riscv_vlsseg3_mask:
1166     case Intrinsic::riscv_vlsseg4_mask:
1167     case Intrinsic::riscv_vlsseg5_mask:
1168     case Intrinsic::riscv_vlsseg6_mask:
1169     case Intrinsic::riscv_vlsseg7_mask:
1170     case Intrinsic::riscv_vlsseg8_mask: {
1171       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1172       return;
1173     }
1174     case Intrinsic::riscv_vloxseg2:
1175     case Intrinsic::riscv_vloxseg3:
1176     case Intrinsic::riscv_vloxseg4:
1177     case Intrinsic::riscv_vloxseg5:
1178     case Intrinsic::riscv_vloxseg6:
1179     case Intrinsic::riscv_vloxseg7:
1180     case Intrinsic::riscv_vloxseg8:
1181       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1182       return;
1183     case Intrinsic::riscv_vluxseg2:
1184     case Intrinsic::riscv_vluxseg3:
1185     case Intrinsic::riscv_vluxseg4:
1186     case Intrinsic::riscv_vluxseg5:
1187     case Intrinsic::riscv_vluxseg6:
1188     case Intrinsic::riscv_vluxseg7:
1189     case Intrinsic::riscv_vluxseg8:
1190       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1191       return;
1192     case Intrinsic::riscv_vloxseg2_mask:
1193     case Intrinsic::riscv_vloxseg3_mask:
1194     case Intrinsic::riscv_vloxseg4_mask:
1195     case Intrinsic::riscv_vloxseg5_mask:
1196     case Intrinsic::riscv_vloxseg6_mask:
1197     case Intrinsic::riscv_vloxseg7_mask:
1198     case Intrinsic::riscv_vloxseg8_mask:
1199       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1200       return;
1201     case Intrinsic::riscv_vluxseg2_mask:
1202     case Intrinsic::riscv_vluxseg3_mask:
1203     case Intrinsic::riscv_vluxseg4_mask:
1204     case Intrinsic::riscv_vluxseg5_mask:
1205     case Intrinsic::riscv_vluxseg6_mask:
1206     case Intrinsic::riscv_vluxseg7_mask:
1207     case Intrinsic::riscv_vluxseg8_mask:
1208       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1209       return;
1210     case Intrinsic::riscv_vlseg8ff:
1211     case Intrinsic::riscv_vlseg7ff:
1212     case Intrinsic::riscv_vlseg6ff:
1213     case Intrinsic::riscv_vlseg5ff:
1214     case Intrinsic::riscv_vlseg4ff:
1215     case Intrinsic::riscv_vlseg3ff:
1216     case Intrinsic::riscv_vlseg2ff: {
1217       selectVLSEGFF(Node, /*IsMasked*/ false);
1218       return;
1219     }
1220     case Intrinsic::riscv_vlseg8ff_mask:
1221     case Intrinsic::riscv_vlseg7ff_mask:
1222     case Intrinsic::riscv_vlseg6ff_mask:
1223     case Intrinsic::riscv_vlseg5ff_mask:
1224     case Intrinsic::riscv_vlseg4ff_mask:
1225     case Intrinsic::riscv_vlseg3ff_mask:
1226     case Intrinsic::riscv_vlseg2ff_mask: {
1227       selectVLSEGFF(Node, /*IsMasked*/ true);
1228       return;
1229     }
1230     case Intrinsic::riscv_vloxei:
1231     case Intrinsic::riscv_vloxei_mask:
1232     case Intrinsic::riscv_vluxei:
1233     case Intrinsic::riscv_vluxei_mask: {
1234       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1235                       IntNo == Intrinsic::riscv_vluxei_mask;
1236       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1237                        IntNo == Intrinsic::riscv_vloxei_mask;
1238 
1239       MVT VT = Node->getSimpleValueType(0);
1240       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1241 
1242       unsigned CurOp = 2;
1243       // Masked intrinsic only have TU version pseduo instructions.
1244       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1245       SmallVector<SDValue, 8> Operands;
1246       if (IsTU)
1247         Operands.push_back(Node->getOperand(CurOp++));
1248       else
1249         // Skip the undef passthru operand for nomask TA version pseudo
1250         CurOp++;
1251 
1252       MVT IndexVT;
1253       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1254                                  /*IsStridedOrIndexed*/ true, Operands,
1255                                  /*IsLoad=*/true, &IndexVT);
1256 
1257       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1258              "Element count mismatch");
1259 
1260       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1261       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1262       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1263       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1264         report_fatal_error("The V extension does not support EEW=64 for index "
1265                            "values when XLEN=32");
1266       }
1267       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1268           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1269           static_cast<unsigned>(IndexLMUL));
1270       MachineSDNode *Load =
1271           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1272 
1273       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1274         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1275 
1276       ReplaceNode(Node, Load);
1277       return;
1278     }
1279     case Intrinsic::riscv_vlm:
1280     case Intrinsic::riscv_vle:
1281     case Intrinsic::riscv_vle_mask:
1282     case Intrinsic::riscv_vlse:
1283     case Intrinsic::riscv_vlse_mask: {
1284       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1285                       IntNo == Intrinsic::riscv_vlse_mask;
1286       bool IsStrided =
1287           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1288 
1289       MVT VT = Node->getSimpleValueType(0);
1290       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1291 
1292       unsigned CurOp = 2;
1293       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1294       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1295       // Masked intrinsic only have TU version pseduo instructions.
1296       bool IsTU =
1297           HasPassthruOperand &&
1298           ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
1299       SmallVector<SDValue, 8> Operands;
1300       if (IsTU)
1301         Operands.push_back(Node->getOperand(CurOp++));
1302       else if (HasPassthruOperand)
1303         // Skip the undef passthru operand for nomask TA version pseudo
1304         CurOp++;
1305 
1306       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1307                                  Operands, /*IsLoad=*/true);
1308 
1309       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1310       const RISCV::VLEPseudo *P =
1311           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1312                               static_cast<unsigned>(LMUL));
1313       MachineSDNode *Load =
1314           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1315 
1316       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1317         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1318 
1319       ReplaceNode(Node, Load);
1320       return;
1321     }
1322     case Intrinsic::riscv_vleff:
1323     case Intrinsic::riscv_vleff_mask: {
1324       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1325 
1326       MVT VT = Node->getSimpleValueType(0);
1327       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1328 
1329       unsigned CurOp = 2;
1330       // Masked intrinsic only have TU version pseduo instructions.
1331       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1332       SmallVector<SDValue, 7> Operands;
1333       if (IsTU)
1334         Operands.push_back(Node->getOperand(CurOp++));
1335       else
1336         // Skip the undef passthru operand for nomask TA version pseudo
1337         CurOp++;
1338 
1339       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1340                                  /*IsStridedOrIndexed*/ false, Operands,
1341                                  /*IsLoad=*/true);
1342 
1343       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1344       const RISCV::VLEPseudo *P =
1345           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1346                               Log2SEW, static_cast<unsigned>(LMUL));
1347       MachineSDNode *Load =
1348           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1349                                  MVT::Other, MVT::Glue, Operands);
1350       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1351                                               /*Glue*/ SDValue(Load, 2));
1352 
1353       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1354         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1355 
1356       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1357       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1358       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1359       CurDAG->RemoveDeadNode(Node);
1360       return;
1361     }
1362     }
1363     break;
1364   }
1365   case ISD::INTRINSIC_VOID: {
1366     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1367     switch (IntNo) {
1368     case Intrinsic::riscv_vsseg2:
1369     case Intrinsic::riscv_vsseg3:
1370     case Intrinsic::riscv_vsseg4:
1371     case Intrinsic::riscv_vsseg5:
1372     case Intrinsic::riscv_vsseg6:
1373     case Intrinsic::riscv_vsseg7:
1374     case Intrinsic::riscv_vsseg8: {
1375       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1376       return;
1377     }
1378     case Intrinsic::riscv_vsseg2_mask:
1379     case Intrinsic::riscv_vsseg3_mask:
1380     case Intrinsic::riscv_vsseg4_mask:
1381     case Intrinsic::riscv_vsseg5_mask:
1382     case Intrinsic::riscv_vsseg6_mask:
1383     case Intrinsic::riscv_vsseg7_mask:
1384     case Intrinsic::riscv_vsseg8_mask: {
1385       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1386       return;
1387     }
1388     case Intrinsic::riscv_vssseg2:
1389     case Intrinsic::riscv_vssseg3:
1390     case Intrinsic::riscv_vssseg4:
1391     case Intrinsic::riscv_vssseg5:
1392     case Intrinsic::riscv_vssseg6:
1393     case Intrinsic::riscv_vssseg7:
1394     case Intrinsic::riscv_vssseg8: {
1395       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1396       return;
1397     }
1398     case Intrinsic::riscv_vssseg2_mask:
1399     case Intrinsic::riscv_vssseg3_mask:
1400     case Intrinsic::riscv_vssseg4_mask:
1401     case Intrinsic::riscv_vssseg5_mask:
1402     case Intrinsic::riscv_vssseg6_mask:
1403     case Intrinsic::riscv_vssseg7_mask:
1404     case Intrinsic::riscv_vssseg8_mask: {
1405       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1406       return;
1407     }
1408     case Intrinsic::riscv_vsoxseg2:
1409     case Intrinsic::riscv_vsoxseg3:
1410     case Intrinsic::riscv_vsoxseg4:
1411     case Intrinsic::riscv_vsoxseg5:
1412     case Intrinsic::riscv_vsoxseg6:
1413     case Intrinsic::riscv_vsoxseg7:
1414     case Intrinsic::riscv_vsoxseg8:
1415       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1416       return;
1417     case Intrinsic::riscv_vsuxseg2:
1418     case Intrinsic::riscv_vsuxseg3:
1419     case Intrinsic::riscv_vsuxseg4:
1420     case Intrinsic::riscv_vsuxseg5:
1421     case Intrinsic::riscv_vsuxseg6:
1422     case Intrinsic::riscv_vsuxseg7:
1423     case Intrinsic::riscv_vsuxseg8:
1424       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1425       return;
1426     case Intrinsic::riscv_vsoxseg2_mask:
1427     case Intrinsic::riscv_vsoxseg3_mask:
1428     case Intrinsic::riscv_vsoxseg4_mask:
1429     case Intrinsic::riscv_vsoxseg5_mask:
1430     case Intrinsic::riscv_vsoxseg6_mask:
1431     case Intrinsic::riscv_vsoxseg7_mask:
1432     case Intrinsic::riscv_vsoxseg8_mask:
1433       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1434       return;
1435     case Intrinsic::riscv_vsuxseg2_mask:
1436     case Intrinsic::riscv_vsuxseg3_mask:
1437     case Intrinsic::riscv_vsuxseg4_mask:
1438     case Intrinsic::riscv_vsuxseg5_mask:
1439     case Intrinsic::riscv_vsuxseg6_mask:
1440     case Intrinsic::riscv_vsuxseg7_mask:
1441     case Intrinsic::riscv_vsuxseg8_mask:
1442       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1443       return;
1444     case Intrinsic::riscv_vsoxei:
1445     case Intrinsic::riscv_vsoxei_mask:
1446     case Intrinsic::riscv_vsuxei:
1447     case Intrinsic::riscv_vsuxei_mask: {
1448       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1449                       IntNo == Intrinsic::riscv_vsuxei_mask;
1450       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1451                        IntNo == Intrinsic::riscv_vsoxei_mask;
1452 
1453       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1454       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1455 
1456       unsigned CurOp = 2;
1457       SmallVector<SDValue, 8> Operands;
1458       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1459 
1460       MVT IndexVT;
1461       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1462                                  /*IsStridedOrIndexed*/ true, Operands,
1463                                  /*IsLoad=*/false, &IndexVT);
1464 
1465       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1466              "Element count mismatch");
1467 
1468       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1469       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1470       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1471       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1472         report_fatal_error("The V extension does not support EEW=64 for index "
1473                            "values when XLEN=32");
1474       }
1475       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1476           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1477           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1478       MachineSDNode *Store =
1479           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1480 
1481       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1482         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1483 
1484       ReplaceNode(Node, Store);
1485       return;
1486     }
1487     case Intrinsic::riscv_vsm:
1488     case Intrinsic::riscv_vse:
1489     case Intrinsic::riscv_vse_mask:
1490     case Intrinsic::riscv_vsse:
1491     case Intrinsic::riscv_vsse_mask: {
1492       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1493                       IntNo == Intrinsic::riscv_vsse_mask;
1494       bool IsStrided =
1495           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1496 
1497       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1498       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1499 
1500       unsigned CurOp = 2;
1501       SmallVector<SDValue, 8> Operands;
1502       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1503 
1504       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1505                                  Operands);
1506 
1507       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1508       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1509           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1510       MachineSDNode *Store =
1511           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1512       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1513         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1514 
1515       ReplaceNode(Node, Store);
1516       return;
1517     }
1518     }
1519     break;
1520   }
1521   case ISD::BITCAST: {
1522     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1523     // Just drop bitcasts between vectors if both are fixed or both are
1524     // scalable.
1525     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1526         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1527       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1528       CurDAG->RemoveDeadNode(Node);
1529       return;
1530     }
1531     break;
1532   }
1533   case ISD::INSERT_SUBVECTOR: {
1534     SDValue V = Node->getOperand(0);
1535     SDValue SubV = Node->getOperand(1);
1536     SDLoc DL(SubV);
1537     auto Idx = Node->getConstantOperandVal(2);
1538     MVT SubVecVT = SubV.getSimpleValueType();
1539 
1540     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1541     MVT SubVecContainerVT = SubVecVT;
1542     // Establish the correct scalable-vector types for any fixed-length type.
1543     if (SubVecVT.isFixedLengthVector())
1544       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1545     if (VT.isFixedLengthVector())
1546       VT = TLI.getContainerForFixedLengthVector(VT);
1547 
1548     const auto *TRI = Subtarget->getRegisterInfo();
1549     unsigned SubRegIdx;
1550     std::tie(SubRegIdx, Idx) =
1551         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1552             VT, SubVecContainerVT, Idx, TRI);
1553 
1554     // If the Idx hasn't been completely eliminated then this is a subvector
1555     // insert which doesn't naturally align to a vector register. These must
1556     // be handled using instructions to manipulate the vector registers.
1557     if (Idx != 0)
1558       break;
1559 
1560     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1561     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1562                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1563                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1564     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1565     assert((!IsSubVecPartReg || V.isUndef()) &&
1566            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1567            "the subvector is smaller than a full-sized register");
1568 
1569     // If we haven't set a SubRegIdx, then we must be going between
1570     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1571     if (SubRegIdx == RISCV::NoSubRegister) {
1572       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1573       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1574                  InRegClassID &&
1575              "Unexpected subvector extraction");
1576       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1577       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1578                                                DL, VT, SubV, RC);
1579       ReplaceNode(Node, NewNode);
1580       return;
1581     }
1582 
1583     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1584     ReplaceNode(Node, Insert.getNode());
1585     return;
1586   }
1587   case ISD::EXTRACT_SUBVECTOR: {
1588     SDValue V = Node->getOperand(0);
1589     auto Idx = Node->getConstantOperandVal(1);
1590     MVT InVT = V.getSimpleValueType();
1591     SDLoc DL(V);
1592 
1593     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1594     MVT SubVecContainerVT = VT;
1595     // Establish the correct scalable-vector types for any fixed-length type.
1596     if (VT.isFixedLengthVector())
1597       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1598     if (InVT.isFixedLengthVector())
1599       InVT = TLI.getContainerForFixedLengthVector(InVT);
1600 
1601     const auto *TRI = Subtarget->getRegisterInfo();
1602     unsigned SubRegIdx;
1603     std::tie(SubRegIdx, Idx) =
1604         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1605             InVT, SubVecContainerVT, Idx, TRI);
1606 
1607     // If the Idx hasn't been completely eliminated then this is a subvector
1608     // extract which doesn't naturally align to a vector register. These must
1609     // be handled using instructions to manipulate the vector registers.
1610     if (Idx != 0)
1611       break;
1612 
1613     // If we haven't set a SubRegIdx, then we must be going between
1614     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1615     if (SubRegIdx == RISCV::NoSubRegister) {
1616       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1617       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1618                  InRegClassID &&
1619              "Unexpected subvector extraction");
1620       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1621       SDNode *NewNode =
1622           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1623       ReplaceNode(Node, NewNode);
1624       return;
1625     }
1626 
1627     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1628     ReplaceNode(Node, Extract.getNode());
1629     return;
1630   }
1631   case ISD::SPLAT_VECTOR:
1632   case RISCVISD::VMV_S_X_VL:
1633   case RISCVISD::VFMV_S_F_VL:
1634   case RISCVISD::VMV_V_X_VL:
1635   case RISCVISD::VFMV_V_F_VL: {
1636     // Try to match splat of a scalar load to a strided load with stride of x0.
1637     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1638                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1639     bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1640     if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1641       break;
1642     SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1643     auto *Ld = dyn_cast<LoadSDNode>(Src);
1644     if (!Ld)
1645       break;
1646     EVT MemVT = Ld->getMemoryVT();
1647     // The memory VT should be the same size as the element type.
1648     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1649       break;
1650     if (!IsProfitableToFold(Src, Node, Node) ||
1651         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1652       break;
1653 
1654     SDValue VL;
1655     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1656       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1657     else if (IsScalarMove) {
1658       // We could deal with more VL if we update the VSETVLI insert pass to
1659       // avoid introducing more VSETVLI.
1660       if (!isOneConstant(Node->getOperand(2)))
1661         break;
1662       selectVLOp(Node->getOperand(2), VL);
1663     } else
1664       selectVLOp(Node->getOperand(2), VL);
1665 
1666     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1667     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1668 
1669     SDValue Operands[] = {Ld->getBasePtr(),
1670                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1671                           Ld->getChain()};
1672 
1673     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1674     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1675         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1676         Log2SEW, static_cast<unsigned>(LMUL));
1677     MachineSDNode *Load =
1678         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1679 
1680     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1681 
1682     ReplaceNode(Node, Load);
1683     return;
1684   }
1685   }
1686 
1687   // Select the default instruction.
1688   SelectCode(Node);
1689 }
1690 
1691 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1692     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1693   switch (ConstraintID) {
1694   case InlineAsm::Constraint_m:
1695     // We just support simple memory operands that have a single address
1696     // operand and need no special handling.
1697     OutOps.push_back(Op);
1698     return false;
1699   case InlineAsm::Constraint_A:
1700     OutOps.push_back(Op);
1701     return false;
1702   default:
1703     break;
1704   }
1705 
1706   return true;
1707 }
1708 
1709 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1710   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1711     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1712     return true;
1713   }
1714   return false;
1715 }
1716 
1717 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1718   // If this is FrameIndex, select it directly. Otherwise just let it get
1719   // selected to a register independently.
1720   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1721     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1722   else
1723     Base = Addr;
1724   return true;
1725 }
1726 
1727 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1728                                         SDValue &ShAmt) {
1729   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1730   // amount. If there is an AND on the shift amount, we can bypass it if it
1731   // doesn't affect any of those bits.
1732   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1733     const APInt &AndMask = N->getConstantOperandAPInt(1);
1734 
1735     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1736     // mask that covers the bits needed to represent all shift amounts.
1737     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1738     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1739 
1740     if (ShMask.isSubsetOf(AndMask)) {
1741       ShAmt = N.getOperand(0);
1742       return true;
1743     }
1744 
1745     // SimplifyDemandedBits may have optimized the mask so try restoring any
1746     // bits that are known zero.
1747     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1748     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1749       ShAmt = N.getOperand(0);
1750       return true;
1751     }
1752   } else if (N.getOpcode() == ISD::SUB &&
1753              isa<ConstantSDNode>(N.getOperand(0))) {
1754     uint64_t Imm = N.getConstantOperandVal(0);
1755     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1756     // generate a NEG instead of a SUB of a constant.
1757     if (Imm != 0 && Imm % ShiftWidth == 0) {
1758       SDLoc DL(N);
1759       EVT VT = N.getValueType();
1760       SDValue Zero =
1761           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, VT);
1762       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1763       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1764                                                   N.getOperand(1));
1765       ShAmt = SDValue(Neg, 0);
1766       return true;
1767     }
1768   }
1769 
1770   ShAmt = N;
1771   return true;
1772 }
1773 
1774 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1775   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1776       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1777     Val = N.getOperand(0);
1778     return true;
1779   }
1780   MVT VT = N.getSimpleValueType();
1781   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1782     Val = N;
1783     return true;
1784   }
1785 
1786   return false;
1787 }
1788 
1789 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1790   if (N.getOpcode() == ISD::AND) {
1791     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1792     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1793       Val = N.getOperand(0);
1794       return true;
1795     }
1796   }
1797   MVT VT = N.getSimpleValueType();
1798   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1799   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1800     Val = N;
1801     return true;
1802   }
1803 
1804   return false;
1805 }
1806 
1807 // Return true if all users of this SDNode* only consume the lower \p Bits.
1808 // This can be used to form W instructions for add/sub/mul/shl even when the
1809 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1810 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1811 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1812 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1813 // may be able to use a W instruction and CSE with the other instruction if
1814 // this has happened. We could try to detect that the CSE opportunity exists
1815 // before doing this, but that would be more complicated.
1816 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1817 // opportunities.
1818 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1819   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1820           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1821           Node->getOpcode() == ISD::SRL ||
1822           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1823           isa<ConstantSDNode>(Node)) &&
1824          "Unexpected opcode");
1825 
1826   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1827     SDNode *User = *UI;
1828     // Users of this node should have already been instruction selected
1829     if (!User->isMachineOpcode())
1830       return false;
1831 
1832     // TODO: Add more opcodes?
1833     switch (User->getMachineOpcode()) {
1834     default:
1835       return false;
1836     case RISCV::ADDW:
1837     case RISCV::ADDIW:
1838     case RISCV::SUBW:
1839     case RISCV::MULW:
1840     case RISCV::SLLW:
1841     case RISCV::SLLIW:
1842     case RISCV::SRAW:
1843     case RISCV::SRAIW:
1844     case RISCV::SRLW:
1845     case RISCV::SRLIW:
1846     case RISCV::DIVW:
1847     case RISCV::DIVUW:
1848     case RISCV::REMW:
1849     case RISCV::REMUW:
1850     case RISCV::ROLW:
1851     case RISCV::RORW:
1852     case RISCV::RORIW:
1853     case RISCV::CLZW:
1854     case RISCV::CTZW:
1855     case RISCV::CPOPW:
1856     case RISCV::SLLI_UW:
1857     case RISCV::FMV_W_X:
1858     case RISCV::FCVT_H_W:
1859     case RISCV::FCVT_H_WU:
1860     case RISCV::FCVT_S_W:
1861     case RISCV::FCVT_S_WU:
1862     case RISCV::FCVT_D_W:
1863     case RISCV::FCVT_D_WU:
1864       if (Bits < 32)
1865         return false;
1866       break;
1867     case RISCV::SLLI:
1868       // SLLI only uses the lower (XLen - ShAmt) bits.
1869       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1870         return false;
1871       break;
1872     case RISCV::ANDI:
1873       if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
1874         return false;
1875       break;
1876     case RISCV::SEXT_B:
1877       if (Bits < 8)
1878         return false;
1879       break;
1880     case RISCV::SEXT_H:
1881     case RISCV::FMV_H_X:
1882     case RISCV::ZEXT_H_RV32:
1883     case RISCV::ZEXT_H_RV64:
1884       if (Bits < 16)
1885         return false;
1886       break;
1887     case RISCV::ADD_UW:
1888     case RISCV::SH1ADD_UW:
1889     case RISCV::SH2ADD_UW:
1890     case RISCV::SH3ADD_UW:
1891       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1892       // 32 bits.
1893       if (UI.getOperandNo() != 0 || Bits < 32)
1894         return false;
1895       break;
1896     case RISCV::SB:
1897       if (UI.getOperandNo() != 0 || Bits < 8)
1898         return false;
1899       break;
1900     case RISCV::SH:
1901       if (UI.getOperandNo() != 0 || Bits < 16)
1902         return false;
1903       break;
1904     case RISCV::SW:
1905       if (UI.getOperandNo() != 0 || Bits < 32)
1906         return false;
1907       break;
1908     }
1909   }
1910 
1911   return true;
1912 }
1913 
1914 // Select VL as a 5 bit immediate or a value that will become a register. This
1915 // allows us to choose betwen VSETIVLI or VSETVLI later.
1916 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1917   auto *C = dyn_cast<ConstantSDNode>(N);
1918   if (C && isUInt<5>(C->getZExtValue())) {
1919     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1920                                    N->getValueType(0));
1921   } else if (C && C->isAllOnesValue()) {
1922     // Treat all ones as VLMax.
1923     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1924                                    N->getValueType(0));
1925   } else if (isa<RegisterSDNode>(N) &&
1926              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
1927     // All our VL operands use an operand that allows GPRNoX0 or an immediate
1928     // as the register class. Convert X0 to a special immediate to pass the
1929     // MachineVerifier. This is recognized specially by the vsetvli insertion
1930     // pass.
1931     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1932                                    N->getValueType(0));
1933   } else {
1934     VL = N;
1935   }
1936 
1937   return true;
1938 }
1939 
1940 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1941   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
1942     return false;
1943   SplatVal = N.getOperand(1);
1944   return true;
1945 }
1946 
1947 using ValidateFn = bool (*)(int64_t);
1948 
1949 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1950                                    SelectionDAG &DAG,
1951                                    const RISCVSubtarget &Subtarget,
1952                                    ValidateFn ValidateImm) {
1953   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
1954       !isa<ConstantSDNode>(N.getOperand(1)))
1955     return false;
1956 
1957   int64_t SplatImm =
1958       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
1959 
1960   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
1961   // type is wider than the resulting vector element type: an implicit
1962   // truncation first takes place. Therefore, perform a manual
1963   // truncation/sign-extension in order to ignore any truncated bits and catch
1964   // any zero-extended immediate.
1965   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1966   // sign-extending to (XLenVT -1).
1967   MVT XLenVT = Subtarget.getXLenVT();
1968   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
1969          "Unexpected splat operand type");
1970   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1971   if (EltVT.bitsLT(XLenVT))
1972     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1973 
1974   if (!ValidateImm(SplatImm))
1975     return false;
1976 
1977   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1978   return true;
1979 }
1980 
1981 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1982   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1983                                 [](int64_t Imm) { return isInt<5>(Imm); });
1984 }
1985 
1986 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1987   return selectVSplatSimmHelper(
1988       N, SplatVal, *CurDAG, *Subtarget,
1989       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1990 }
1991 
1992 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1993                                                       SDValue &SplatVal) {
1994   return selectVSplatSimmHelper(
1995       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1996         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1997       });
1998 }
1999 
2000 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2001   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2002       !isa<ConstantSDNode>(N.getOperand(1)))
2003     return false;
2004 
2005   int64_t SplatImm =
2006       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2007 
2008   if (!isUInt<5>(SplatImm))
2009     return false;
2010 
2011   SplatVal =
2012       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2013 
2014   return true;
2015 }
2016 
2017 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2018                                        SDValue &Imm) {
2019   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2020     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2021 
2022     if (!isInt<5>(ImmVal))
2023       return false;
2024 
2025     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2026     return true;
2027   }
2028 
2029   return false;
2030 }
2031 
2032 // Merge an ADDI into the offset of a load/store instruction where possible.
2033 // (load (addi base, off1), off2) -> (load base, off1+off2)
2034 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
2035 // This is possible when off1+off2 fits a 12-bit immediate.
2036 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
2037   int OffsetOpIdx;
2038   int BaseOpIdx;
2039 
2040   // Only attempt this optimisation for I-type loads and S-type stores.
2041   switch (N->getMachineOpcode()) {
2042   default:
2043     return false;
2044   case RISCV::LB:
2045   case RISCV::LH:
2046   case RISCV::LW:
2047   case RISCV::LBU:
2048   case RISCV::LHU:
2049   case RISCV::LWU:
2050   case RISCV::LD:
2051   case RISCV::FLH:
2052   case RISCV::FLW:
2053   case RISCV::FLD:
2054     BaseOpIdx = 0;
2055     OffsetOpIdx = 1;
2056     break;
2057   case RISCV::SB:
2058   case RISCV::SH:
2059   case RISCV::SW:
2060   case RISCV::SD:
2061   case RISCV::FSH:
2062   case RISCV::FSW:
2063   case RISCV::FSD:
2064     BaseOpIdx = 1;
2065     OffsetOpIdx = 2;
2066     break;
2067   }
2068 
2069   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
2070     return false;
2071 
2072   SDValue Base = N->getOperand(BaseOpIdx);
2073 
2074   // If the base is an ADDI, we can merge it in to the load/store.
2075   if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
2076     return false;
2077 
2078   SDValue ImmOperand = Base.getOperand(1);
2079   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
2080 
2081   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
2082     int64_t Offset1 = Const->getSExtValue();
2083     int64_t CombinedOffset = Offset1 + Offset2;
2084     if (!isInt<12>(CombinedOffset))
2085       return false;
2086     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
2087                                            ImmOperand.getValueType());
2088   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
2089     // If the off1 in (addi base, off1) is a global variable's address (its
2090     // low part, really), then we can rely on the alignment of that variable
2091     // to provide a margin of safety before off1 can overflow the 12 bits.
2092     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
2093     const DataLayout &DL = CurDAG->getDataLayout();
2094     Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
2095     if (Offset2 != 0 && Alignment <= Offset2)
2096       return false;
2097     int64_t Offset1 = GA->getOffset();
2098     int64_t CombinedOffset = Offset1 + Offset2;
2099     ImmOperand = CurDAG->getTargetGlobalAddress(
2100         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
2101         CombinedOffset, GA->getTargetFlags());
2102   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
2103     // Ditto.
2104     Align Alignment = CP->getAlign();
2105     if (Offset2 != 0 && Alignment <= Offset2)
2106       return false;
2107     int64_t Offset1 = CP->getOffset();
2108     int64_t CombinedOffset = Offset1 + Offset2;
2109     ImmOperand = CurDAG->getTargetConstantPool(
2110         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
2111         CombinedOffset, CP->getTargetFlags());
2112   } else {
2113     return false;
2114   }
2115 
2116   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
2117   LLVM_DEBUG(Base->dump(CurDAG));
2118   LLVM_DEBUG(dbgs() << "\nN: ");
2119   LLVM_DEBUG(N->dump(CurDAG));
2120   LLVM_DEBUG(dbgs() << "\n");
2121 
2122   // Modify the offset operand of the load/store.
2123   if (BaseOpIdx == 0) // Load
2124     CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
2125                                N->getOperand(2));
2126   else // Store
2127     CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
2128                                ImmOperand, N->getOperand(3));
2129 
2130   return true;
2131 }
2132 
2133 // Try to remove sext.w if the input is a W instruction or can be made into
2134 // a W instruction cheaply.
2135 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2136   // Look for the sext.w pattern, addiw rd, rs1, 0.
2137   if (N->getMachineOpcode() != RISCV::ADDIW ||
2138       !isNullConstant(N->getOperand(1)))
2139     return false;
2140 
2141   SDValue N0 = N->getOperand(0);
2142   if (!N0.isMachineOpcode())
2143     return false;
2144 
2145   switch (N0.getMachineOpcode()) {
2146   default:
2147     break;
2148   case RISCV::ADD:
2149   case RISCV::ADDI:
2150   case RISCV::SUB:
2151   case RISCV::MUL:
2152   case RISCV::SLLI: {
2153     // Convert sext.w+add/sub/mul to their W instructions. This will create
2154     // a new independent instruction. This improves latency.
2155     unsigned Opc;
2156     switch (N0.getMachineOpcode()) {
2157     default:
2158       llvm_unreachable("Unexpected opcode!");
2159     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2160     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2161     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2162     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2163     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2164     }
2165 
2166     SDValue N00 = N0.getOperand(0);
2167     SDValue N01 = N0.getOperand(1);
2168 
2169     // Shift amount needs to be uimm5.
2170     if (N0.getMachineOpcode() == RISCV::SLLI &&
2171         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2172       break;
2173 
2174     SDNode *Result =
2175         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2176                                N00, N01);
2177     ReplaceUses(N, Result);
2178     return true;
2179   }
2180   case RISCV::ADDW:
2181   case RISCV::ADDIW:
2182   case RISCV::SUBW:
2183   case RISCV::MULW:
2184   case RISCV::SLLIW:
2185     // Result is already sign extended just remove the sext.w.
2186     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2187     ReplaceUses(N, N0.getNode());
2188     return true;
2189   }
2190 
2191   return false;
2192 }
2193 
2194 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2195 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2196 // take the form of a V0 physical register operand, with a glued
2197 // register-setting instruction.
2198 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2199   const RISCV::RISCVMaskedPseudoInfo *I =
2200       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2201   if (!I)
2202     return false;
2203 
2204   unsigned MaskOpIdx = I->MaskOpIdx;
2205 
2206   // Check that we're using V0 as a mask register.
2207   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2208       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2209     return false;
2210 
2211   // The glued user defines V0.
2212   const auto *Glued = N->getGluedNode();
2213 
2214   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2215     return false;
2216 
2217   // Check that we're defining V0 as a mask register.
2218   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2219       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2220     return false;
2221 
2222   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2223   SDValue MaskSetter = Glued->getOperand(2);
2224 
2225   const auto IsVMSet = [](unsigned Opc) {
2226     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2227            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2228            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2229            Opc == RISCV::PseudoVMSET_M_B8;
2230   };
2231 
2232   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2233   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2234   // assume that it's all-ones? Same applies to its VL.
2235   if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2236     return false;
2237 
2238   // Retrieve the tail policy operand index, if any.
2239   Optional<unsigned> TailPolicyOpIdx;
2240   const RISCVInstrInfo *TII = static_cast<const RISCVInstrInfo *>(
2241       CurDAG->getSubtarget().getInstrInfo());
2242 
2243   const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode());
2244 
2245   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2246     // The last operand of the pseudo is the policy op, but we're expecting a
2247     // Glue operand last. We may also have a chain.
2248     TailPolicyOpIdx = N->getNumOperands() - 1;
2249     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2250       (*TailPolicyOpIdx)--;
2251     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2252       (*TailPolicyOpIdx)--;
2253 
2254     // If the policy isn't TAIL_AGNOSTIC we can't perform this optimization.
2255     if (N->getConstantOperandVal(*TailPolicyOpIdx) != RISCVII::TAIL_AGNOSTIC)
2256       return false;
2257   }
2258 
2259   const MCInstrDesc &UnmaskedMCID = TII->get(I->UnmaskedPseudo);
2260 
2261   // Check that we're dropping the merge operand, the mask operand, and any
2262   // policy operand when we transform to this unmasked pseudo.
2263   assert(!RISCVII::hasMergeOp(UnmaskedMCID.TSFlags) &&
2264          RISCVII::hasDummyMaskOp(UnmaskedMCID.TSFlags) &&
2265          !RISCVII::hasVecPolicyOp(UnmaskedMCID.TSFlags) &&
2266          "Unexpected pseudo to transform to");
2267   (void)UnmaskedMCID;
2268 
2269   SmallVector<SDValue, 8> Ops;
2270   // Skip the merge operand at index 0.
2271   for (unsigned I = 1, E = N->getNumOperands(); I != E; I++) {
2272     // Skip the mask, the policy, and the Glue.
2273     SDValue Op = N->getOperand(I);
2274     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2275         Op.getValueType() == MVT::Glue)
2276       continue;
2277     Ops.push_back(Op);
2278   }
2279 
2280   // Transitively apply any node glued to our new node.
2281   if (auto *TGlued = Glued->getGluedNode())
2282     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2283 
2284   SDNode *Result =
2285       CurDAG->getMachineNode(I->UnmaskedPseudo, SDLoc(N), N->getVTList(), Ops);
2286   ReplaceUses(N, Result);
2287 
2288   return true;
2289 }
2290 
2291 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2292 // for instruction scheduling.
2293 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
2294   return new RISCVDAGToDAGISel(TM);
2295 }
2296