1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
45 void RISCVDAGToDAGISel::PreprocessISelDAG() {
46   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
47                                        E = CurDAG->allnodes_end();
48        I != E;) {
49     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50 
51     // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
52     // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
53     if (N->getOpcode() == ISD::SPLAT_VECTOR) {
54       MVT VT = N->getSimpleValueType(0);
55       unsigned Opc =
56           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
57       SDLoc DL(N);
58       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
59       SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
60                                        N->getOperand(0), VL);
61 
62       --I;
63       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
64       ++I;
65       CurDAG->DeleteNode(N);
66       continue;
67     }
68 
69     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70     // load. Done after lowering and combining so that we have a chance to
71     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
73       continue;
74 
75     assert(N->getNumOperands() == 4 && "Unexpected number of operands");
76     MVT VT = N->getSimpleValueType(0);
77     SDValue Passthru = N->getOperand(0);
78     SDValue Lo = N->getOperand(1);
79     SDValue Hi = N->getOperand(2);
80     SDValue VL = N->getOperand(3);
81     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
82            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
83            "Unexpected VTs!");
84     MachineFunction &MF = CurDAG->getMachineFunction();
85     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
86     SDLoc DL(N);
87 
88     // We use the same frame index we use for moving two i32s into 64-bit FPR.
89     // This is an analogous operation.
90     int FI = FuncInfo->getMoveF64FrameIndex(MF);
91     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
92     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
93     SDValue StackSlot =
94         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
95 
96     SDValue Chain = CurDAG->getEntryNode();
97     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
98 
99     SDValue OffsetSlot =
100         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
101     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
102                           Align(8));
103 
104     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
105 
106     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
107     SDValue IntID =
108         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
109     SDValue Ops[] = {Chain,
110                      IntID,
111                      Passthru,
112                      StackSlot,
113                      CurDAG->getRegister(RISCV::X0, MVT::i64),
114                      VL};
115 
116     SDValue Result = CurDAG->getMemIntrinsicNode(
117         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
118         MachineMemOperand::MOLoad);
119 
120     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
121     // vlse we created.  This will cause general havok on the dag because
122     // anything below the conversion could be folded into other existing nodes.
123     // To avoid invalidating 'I', back it up to the convert node.
124     --I;
125     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
126 
127     // Now that we did that, the node is dead.  Increment the iterator to the
128     // next node to process, then delete N.
129     ++I;
130     CurDAG->DeleteNode(N);
131   }
132 }
133 
134 void RISCVDAGToDAGISel::PostprocessISelDAG() {
135   HandleSDNode Dummy(CurDAG->getRoot());
136   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
137 
138   bool MadeChange = false;
139   while (Position != CurDAG->allnodes_begin()) {
140     SDNode *N = &*--Position;
141     // Skip dead nodes and any non-machine opcodes.
142     if (N->use_empty() || !N->isMachineOpcode())
143       continue;
144 
145     MadeChange |= doPeepholeSExtW(N);
146     MadeChange |= doPeepholeLoadStoreADDI(N);
147     MadeChange |= doPeepholeMaskedRVV(N);
148   }
149 
150   CurDAG->setRoot(Dummy.getValue());
151 
152   if (MadeChange)
153     CurDAG->RemoveDeadNodes();
154 }
155 
156 static SDNode *selectImmWithConstantPool(SelectionDAG *CurDAG, const SDLoc &DL,
157                                          const MVT VT, int64_t Imm,
158                                          const RISCVSubtarget &Subtarget) {
159   assert(VT == MVT::i64 && "Expecting MVT::i64");
160   const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
161   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(CurDAG->getConstantPool(
162       ConstantInt::get(EVT(VT).getTypeForEVT(*CurDAG->getContext()), Imm), VT));
163   SDValue Addr = TLI->getAddr(CP, *CurDAG);
164   SDValue Offset = CurDAG->getTargetConstant(0, DL, VT);
165   // Since there is no data race, the chain can be the entry node.
166   SDNode *Load = CurDAG->getMachineNode(RISCV::LD, DL, VT, Addr, Offset,
167                                         CurDAG->getEntryNode());
168   MachineFunction &MF = CurDAG->getMachineFunction();
169   MachineMemOperand *MemOp = MF.getMachineMemOperand(
170       MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
171       LLT(VT), CP->getAlign());
172   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Load), {MemOp});
173   return Load;
174 }
175 
176 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
177                          int64_t Imm, const RISCVSubtarget &Subtarget) {
178   MVT XLenVT = Subtarget.getXLenVT();
179   RISCVMatInt::InstSeq Seq =
180       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
181 
182   // If Imm is expensive to build, then we put it into constant pool.
183   if (Subtarget.useConstantPoolForLargeInts() &&
184       Seq.size() > Subtarget.getMaxBuildIntsCost())
185     return selectImmWithConstantPool(CurDAG, DL, VT, Imm, Subtarget);
186 
187   SDNode *Result = nullptr;
188   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
189   for (RISCVMatInt::Inst &Inst : Seq) {
190     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
191     if (Inst.Opc == RISCV::LUI)
192       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
193     else if (Inst.Opc == RISCV::ADD_UW)
194       Result = CurDAG->getMachineNode(RISCV::ADD_UW, DL, XLenVT, SrcReg,
195                                       CurDAG->getRegister(RISCV::X0, XLenVT));
196     else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
197              Inst.Opc == RISCV::SH3ADD)
198       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
199     else
200       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
201 
202     // Only the first instruction has X0 as its source.
203     SrcReg = SDValue(Result, 0);
204   }
205 
206   return Result;
207 }
208 
209 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
210                                unsigned RegClassID, unsigned SubReg0) {
211   assert(Regs.size() >= 2 && Regs.size() <= 8);
212 
213   SDLoc DL(Regs[0]);
214   SmallVector<SDValue, 8> Ops;
215 
216   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
217 
218   for (unsigned I = 0; I < Regs.size(); ++I) {
219     Ops.push_back(Regs[I]);
220     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
221   }
222   SDNode *N =
223       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
224   return SDValue(N, 0);
225 }
226 
227 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
228                              unsigned NF) {
229   static const unsigned RegClassIDs[] = {
230       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
231       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
232       RISCV::VRN8M1RegClassID};
233 
234   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
235 }
236 
237 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
238                              unsigned NF) {
239   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
240                                          RISCV::VRN3M2RegClassID,
241                                          RISCV::VRN4M2RegClassID};
242 
243   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
244 }
245 
246 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
247                              unsigned NF) {
248   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
249                          RISCV::sub_vrm4_0);
250 }
251 
252 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
253                            unsigned NF, RISCVII::VLMUL LMUL) {
254   switch (LMUL) {
255   default:
256     llvm_unreachable("Invalid LMUL.");
257   case RISCVII::VLMUL::LMUL_F8:
258   case RISCVII::VLMUL::LMUL_F4:
259   case RISCVII::VLMUL::LMUL_F2:
260   case RISCVII::VLMUL::LMUL_1:
261     return createM1Tuple(CurDAG, Regs, NF);
262   case RISCVII::VLMUL::LMUL_2:
263     return createM2Tuple(CurDAG, Regs, NF);
264   case RISCVII::VLMUL::LMUL_4:
265     return createM4Tuple(CurDAG, Regs, NF);
266   }
267 }
268 
269 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
270     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
271     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
272     bool IsLoad, MVT *IndexVT) {
273   SDValue Chain = Node->getOperand(0);
274   SDValue Glue;
275 
276   SDValue Base;
277   SelectBaseAddr(Node->getOperand(CurOp++), Base);
278   Operands.push_back(Base); // Base pointer.
279 
280   if (IsStridedOrIndexed) {
281     Operands.push_back(Node->getOperand(CurOp++)); // Index.
282     if (IndexVT)
283       *IndexVT = Operands.back()->getSimpleValueType(0);
284   }
285 
286   if (IsMasked) {
287     // Mask needs to be copied to V0.
288     SDValue Mask = Node->getOperand(CurOp++);
289     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
290     Glue = Chain.getValue(1);
291     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
292   }
293   SDValue VL;
294   selectVLOp(Node->getOperand(CurOp++), VL);
295   Operands.push_back(VL);
296 
297   MVT XLenVT = Subtarget->getXLenVT();
298   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
299   Operands.push_back(SEWOp);
300 
301   // Masked load has the tail policy argument.
302   if (IsMasked && IsLoad) {
303     // Policy must be a constant.
304     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
305     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
306     Operands.push_back(PolicyOp);
307   }
308 
309   Operands.push_back(Chain); // Chain.
310   if (Glue)
311     Operands.push_back(Glue);
312 }
313 
314 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
315                                     bool IsStrided) {
316   SDLoc DL(Node);
317   unsigned NF = Node->getNumValues() - 1;
318   MVT VT = Node->getSimpleValueType(0);
319   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
320   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
321 
322   unsigned CurOp = 2;
323   SmallVector<SDValue, 8> Operands;
324   if (IsMasked) {
325     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
326                                  Node->op_begin() + CurOp + NF);
327     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
328     Operands.push_back(MaskedOff);
329     CurOp += NF;
330   }
331 
332   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
333                              Operands, /*IsLoad=*/true);
334 
335   const RISCV::VLSEGPseudo *P =
336       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
337                             static_cast<unsigned>(LMUL));
338   MachineSDNode *Load =
339       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
340 
341   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
342     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
343 
344   SDValue SuperReg = SDValue(Load, 0);
345   for (unsigned I = 0; I < NF; ++I) {
346     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
347     ReplaceUses(SDValue(Node, I),
348                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
349   }
350 
351   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
352   CurDAG->RemoveDeadNode(Node);
353 }
354 
355 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
356   SDLoc DL(Node);
357   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
358   MVT VT = Node->getSimpleValueType(0);
359   MVT XLenVT = Subtarget->getXLenVT();
360   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
361   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
362 
363   unsigned CurOp = 2;
364   SmallVector<SDValue, 7> Operands;
365   if (IsMasked) {
366     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
367                                  Node->op_begin() + CurOp + NF);
368     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
369     Operands.push_back(MaskedOff);
370     CurOp += NF;
371   }
372 
373   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
374                              /*IsStridedOrIndexed*/ false, Operands,
375                              /*IsLoad=*/true);
376 
377   const RISCV::VLSEGPseudo *P =
378       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
379                             Log2SEW, static_cast<unsigned>(LMUL));
380   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
381                                                MVT::Other, MVT::Glue, Operands);
382   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
383                                           /*Glue*/ SDValue(Load, 2));
384 
385   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
386     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
387 
388   SDValue SuperReg = SDValue(Load, 0);
389   for (unsigned I = 0; I < NF; ++I) {
390     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
391     ReplaceUses(SDValue(Node, I),
392                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
393   }
394 
395   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
396   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
397   CurDAG->RemoveDeadNode(Node);
398 }
399 
400 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
401                                      bool IsOrdered) {
402   SDLoc DL(Node);
403   unsigned NF = Node->getNumValues() - 1;
404   MVT VT = Node->getSimpleValueType(0);
405   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
406   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
407 
408   unsigned CurOp = 2;
409   SmallVector<SDValue, 8> Operands;
410   if (IsMasked) {
411     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
412                                  Node->op_begin() + CurOp + NF);
413     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
414     Operands.push_back(MaskedOff);
415     CurOp += NF;
416   }
417 
418   MVT IndexVT;
419   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
420                              /*IsStridedOrIndexed*/ true, Operands,
421                              /*IsLoad=*/true, &IndexVT);
422 
423   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
424          "Element count mismatch");
425 
426   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
427   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
428   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
429     report_fatal_error("The V extension does not support EEW=64 for index "
430                        "values when XLEN=32");
431   }
432   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
433       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
434       static_cast<unsigned>(IndexLMUL));
435   MachineSDNode *Load =
436       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
437 
438   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
439     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
440 
441   SDValue SuperReg = SDValue(Load, 0);
442   for (unsigned I = 0; I < NF; ++I) {
443     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
444     ReplaceUses(SDValue(Node, I),
445                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
446   }
447 
448   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
449   CurDAG->RemoveDeadNode(Node);
450 }
451 
452 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
453                                     bool IsStrided) {
454   SDLoc DL(Node);
455   unsigned NF = Node->getNumOperands() - 4;
456   if (IsStrided)
457     NF--;
458   if (IsMasked)
459     NF--;
460   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
461   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
462   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
463   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
464   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
465 
466   SmallVector<SDValue, 8> Operands;
467   Operands.push_back(StoreVal);
468   unsigned CurOp = 2 + NF;
469 
470   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
471                              Operands);
472 
473   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
474       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
475   MachineSDNode *Store =
476       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
477 
478   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
479     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
480 
481   ReplaceNode(Node, Store);
482 }
483 
484 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
485                                      bool IsOrdered) {
486   SDLoc DL(Node);
487   unsigned NF = Node->getNumOperands() - 5;
488   if (IsMasked)
489     --NF;
490   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
491   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
492   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
493   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
494   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
495 
496   SmallVector<SDValue, 8> Operands;
497   Operands.push_back(StoreVal);
498   unsigned CurOp = 2 + NF;
499 
500   MVT IndexVT;
501   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
502                              /*IsStridedOrIndexed*/ true, Operands,
503                              /*IsLoad=*/false, &IndexVT);
504 
505   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
506          "Element count mismatch");
507 
508   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
509   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
510   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
511     report_fatal_error("The V extension does not support EEW=64 for index "
512                        "values when XLEN=32");
513   }
514   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
515       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
516       static_cast<unsigned>(IndexLMUL));
517   MachineSDNode *Store =
518       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
519 
520   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
521     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
522 
523   ReplaceNode(Node, Store);
524 }
525 
526 void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
527   if (!Subtarget->hasVInstructions())
528     return;
529 
530   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
531           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
532          "Unexpected opcode");
533 
534   SDLoc DL(Node);
535   MVT XLenVT = Subtarget->getXLenVT();
536 
537   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
538   unsigned IntNoOffset = HasChain ? 1 : 0;
539   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
540 
541   assert((IntNo == Intrinsic::riscv_vsetvli ||
542           IntNo == Intrinsic::riscv_vsetvlimax ||
543           IntNo == Intrinsic::riscv_vsetvli_opt ||
544           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
545          "Unexpected vsetvli intrinsic");
546 
547   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
548                IntNo == Intrinsic::riscv_vsetvlimax_opt;
549   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
550 
551   assert(Node->getNumOperands() == Offset + 2 &&
552          "Unexpected number of operands");
553 
554   unsigned SEW =
555       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
556   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
557       Node->getConstantOperandVal(Offset + 1) & 0x7);
558 
559   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
560                                             /*MaskAgnostic*/ false);
561   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
562 
563   SmallVector<EVT, 2> VTs = {XLenVT};
564   if (HasChain)
565     VTs.push_back(MVT::Other);
566 
567   SDValue VLOperand;
568   unsigned Opcode = RISCV::PseudoVSETVLI;
569   if (VLMax) {
570     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
571     Opcode = RISCV::PseudoVSETVLIX0;
572   } else {
573     VLOperand = Node->getOperand(IntNoOffset + 1);
574 
575     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
576       uint64_t AVL = C->getZExtValue();
577       if (isUInt<5>(AVL)) {
578         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
579         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
580         if (HasChain)
581           Ops.push_back(Node->getOperand(0));
582         ReplaceNode(
583             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
584         return;
585       }
586     }
587   }
588 
589   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
590   if (HasChain)
591     Ops.push_back(Node->getOperand(0));
592 
593   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
594 }
595 
596 void RISCVDAGToDAGISel::Select(SDNode *Node) {
597   // If we have a custom node, we have already selected.
598   if (Node->isMachineOpcode()) {
599     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
600     Node->setNodeId(-1);
601     return;
602   }
603 
604   // Instruction Selection not handled by the auto-generated tablegen selection
605   // should be handled here.
606   unsigned Opcode = Node->getOpcode();
607   MVT XLenVT = Subtarget->getXLenVT();
608   SDLoc DL(Node);
609   MVT VT = Node->getSimpleValueType(0);
610 
611   switch (Opcode) {
612   case ISD::Constant: {
613     auto *ConstNode = cast<ConstantSDNode>(Node);
614     if (VT == XLenVT && ConstNode->isZero()) {
615       SDValue New =
616           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
617       ReplaceNode(Node, New.getNode());
618       return;
619     }
620     int64_t Imm = ConstNode->getSExtValue();
621     // If the upper XLen-16 bits are not used, try to convert this to a simm12
622     // by sign extending bit 15.
623     if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
624         hasAllHUsers(Node))
625       Imm = SignExtend64(Imm, 16);
626     // If the upper 32-bits are not used try to convert this into a simm32 by
627     // sign extending bit 32.
628     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
629       Imm = SignExtend64(Imm, 32);
630 
631     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
632     return;
633   }
634   case ISD::FrameIndex: {
635     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
636     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
637     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
638     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
639     return;
640   }
641   case ISD::SRL: {
642     // Optimize (srl (and X, C2), C) ->
643     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
644     // Where C2 is a mask with C3 trailing ones.
645     // Taking into account that the C2 may have had lower bits unset by
646     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
647     // This pattern occurs when type legalizing right shifts for types with
648     // less than XLen bits.
649     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
650     if (!N1C)
651       break;
652     SDValue N0 = Node->getOperand(0);
653     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
654         !isa<ConstantSDNode>(N0.getOperand(1)))
655       break;
656     unsigned ShAmt = N1C->getZExtValue();
657     uint64_t Mask = N0.getConstantOperandVal(1);
658     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
659     if (!isMask_64(Mask))
660       break;
661     unsigned TrailingOnes = countTrailingOnes(Mask);
662     // 32 trailing ones should use srliw via tablegen pattern.
663     if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
664       break;
665     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
666     SDNode *SLLI =
667         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
668                                CurDAG->getTargetConstant(LShAmt, DL, VT));
669     SDNode *SRLI = CurDAG->getMachineNode(
670         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
671         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
672     ReplaceNode(Node, SRLI);
673     return;
674   }
675   case ISD::SRA: {
676     // Optimize (sra (sext_inreg X, i16), C) ->
677     //          (srai (slli X, (XLen-16), (XLen-16) + C)
678     // And      (sra (sext_inreg X, i8), C) ->
679     //          (srai (slli X, (XLen-8), (XLen-8) + C)
680     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
681     // This transform matches the code we get without Zbb. The shifts are more
682     // compressible, and this can help expose CSE opportunities in the sdiv by
683     // constant optimization.
684     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
685     if (!N1C)
686       break;
687     SDValue N0 = Node->getOperand(0);
688     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
689       break;
690     unsigned ShAmt = N1C->getZExtValue();
691     unsigned ExtSize =
692         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
693     // ExtSize of 32 should use sraiw via tablegen pattern.
694     if (ExtSize >= 32 || ShAmt >= ExtSize)
695       break;
696     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
697     SDNode *SLLI =
698         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
699                                CurDAG->getTargetConstant(LShAmt, DL, VT));
700     SDNode *SRAI = CurDAG->getMachineNode(
701         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
702         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
703     ReplaceNode(Node, SRAI);
704     return;
705   }
706   case ISD::AND: {
707     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
708     if (!N1C)
709       break;
710 
711     SDValue N0 = Node->getOperand(0);
712 
713     bool LeftShift = N0.getOpcode() == ISD::SHL;
714     if (!LeftShift && N0.getOpcode() != ISD::SRL)
715       break;
716 
717     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
718     if (!C)
719       break;
720     uint64_t C2 = C->getZExtValue();
721     unsigned XLen = Subtarget->getXLen();
722     if (!C2 || C2 >= XLen)
723       break;
724 
725     uint64_t C1 = N1C->getZExtValue();
726 
727     // Keep track of whether this is an andi.
728     bool IsANDI = isInt<12>(N1C->getSExtValue());
729 
730     // Clear irrelevant bits in the mask.
731     if (LeftShift)
732       C1 &= maskTrailingZeros<uint64_t>(C2);
733     else
734       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
735 
736     // Some transforms should only be done if the shift has a single use or
737     // the AND would become (srli (slli X, 32), 32)
738     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
739 
740     SDValue X = N0.getOperand(0);
741 
742     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
743     // with c3 leading zeros.
744     if (!LeftShift && isMask_64(C1)) {
745       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
746       if (C2 < C3) {
747         // If the number of leading zeros is C2+32 this can be SRLIW.
748         if (C2 + 32 == C3) {
749           SDNode *SRLIW =
750               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
751                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
752           ReplaceNode(Node, SRLIW);
753           return;
754         }
755 
756         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
757         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
758         //
759         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
760         // legalized and goes through DAG combine.
761         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
762             X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
763             cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
764           SDNode *SRAIW =
765               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, X.getOperand(0),
766                                      CurDAG->getTargetConstant(31, DL, XLenVT));
767           SDNode *SRLIW = CurDAG->getMachineNode(
768               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
769               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
770           ReplaceNode(Node, SRLIW);
771           return;
772         }
773 
774         // (srli (slli x, c3-c2), c3).
775         // Skip it in order to select sraiw.
776         bool Skip = Subtarget->hasStdExtZba() && C3 == 32 &&
777                     X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
778                     cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
779         if (OneUseOrZExtW && !IsANDI && !Skip) {
780           SDNode *SLLI = CurDAG->getMachineNode(
781               RISCV::SLLI, DL, XLenVT, X,
782               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
783           SDNode *SRLI =
784               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
785                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
786           ReplaceNode(Node, SRLI);
787           return;
788         }
789       }
790     }
791 
792     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
793     // shifted by c2 bits with c3 leading zeros.
794     if (LeftShift && isShiftedMask_64(C1)) {
795       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
796 
797       if (C2 + C3 < XLen &&
798           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
799         // Use slli.uw when possible.
800         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
801           SDNode *SLLI_UW =
802               CurDAG->getMachineNode(RISCV::SLLI_UW, DL, XLenVT, X,
803                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
804           ReplaceNode(Node, SLLI_UW);
805           return;
806         }
807 
808         // (srli (slli c2+c3), c3)
809         if (OneUseOrZExtW && !IsANDI) {
810           SDNode *SLLI = CurDAG->getMachineNode(
811               RISCV::SLLI, DL, XLenVT, X,
812               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
813           SDNode *SRLI =
814               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
815                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
816           ReplaceNode(Node, SRLI);
817           return;
818         }
819       }
820     }
821 
822     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
823     // shifted mask with c2 leading zeros and c3 trailing zeros.
824     if (!LeftShift && isShiftedMask_64(C1)) {
825       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
826       uint64_t C3 = countTrailingZeros(C1);
827       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsANDI) {
828         SDNode *SRLI = CurDAG->getMachineNode(
829             RISCV::SRLI, DL, XLenVT, X,
830             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
831         SDNode *SLLI =
832             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
833                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
834         ReplaceNode(Node, SLLI);
835         return;
836       }
837       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
838       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
839           OneUseOrZExtW && !IsANDI) {
840         SDNode *SRLIW = CurDAG->getMachineNode(
841             RISCV::SRLIW, DL, XLenVT, X,
842             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
843         SDNode *SLLI =
844             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
845                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
846         ReplaceNode(Node, SLLI);
847         return;
848       }
849     }
850 
851     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
852     // shifted mask with no leading zeros and c3 trailing zeros.
853     if (LeftShift && isShiftedMask_64(C1)) {
854       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
855       uint64_t C3 = countTrailingZeros(C1);
856       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsANDI) {
857         SDNode *SRLI = CurDAG->getMachineNode(
858             RISCV::SRLI, DL, XLenVT, X,
859             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
860         SDNode *SLLI =
861             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
862                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
863         ReplaceNode(Node, SLLI);
864         return;
865       }
866       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
867       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsANDI) {
868         SDNode *SRLIW = CurDAG->getMachineNode(
869             RISCV::SRLIW, DL, XLenVT, X,
870             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
871         SDNode *SLLI =
872             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
873                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
874         ReplaceNode(Node, SLLI);
875         return;
876       }
877     }
878 
879     break;
880   }
881   case ISD::MUL: {
882     // Special case for calculating (mul (and X, C2), C1) where the full product
883     // fits in XLen bits. We can shift X left by the number of leading zeros in
884     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
885     // product has XLen trailing zeros, putting it in the output of MULHU. This
886     // can avoid materializing a constant in a register for C2.
887 
888     // RHS should be a constant.
889     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
890     if (!N1C || !N1C->hasOneUse())
891       break;
892 
893     // LHS should be an AND with constant.
894     SDValue N0 = Node->getOperand(0);
895     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
896       break;
897 
898     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
899 
900     // Constant should be a mask.
901     if (!isMask_64(C2))
902       break;
903 
904     // This should be the only use of the AND unless we will use
905     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
906     // constants.
907     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
908       break;
909 
910     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
911     // optimization.
912     if (isInt<12>(C2) ||
913         (C2 == UINT64_C(0xFFFF) &&
914          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
915         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
916       break;
917 
918     // We need to shift left the AND input and C1 by a total of XLen bits.
919 
920     // How far left do we need to shift the AND input?
921     unsigned XLen = Subtarget->getXLen();
922     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
923 
924     // The constant gets shifted by the remaining amount unless that would
925     // shift bits out.
926     uint64_t C1 = N1C->getZExtValue();
927     unsigned ConstantShift = XLen - LeadingZeros;
928     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
929       break;
930 
931     uint64_t ShiftedC1 = C1 << ConstantShift;
932     // If this RV32, we need to sign extend the constant.
933     if (XLen == 32)
934       ShiftedC1 = SignExtend64(ShiftedC1, 32);
935 
936     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
937     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
938     SDNode *SLLI =
939         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
940                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
941     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
942                                            SDValue(SLLI, 0), SDValue(Imm, 0));
943     ReplaceNode(Node, MULHU);
944     return;
945   }
946   case ISD::INTRINSIC_WO_CHAIN: {
947     unsigned IntNo = Node->getConstantOperandVal(0);
948     switch (IntNo) {
949       // By default we do not custom select any intrinsic.
950     default:
951       break;
952     case Intrinsic::riscv_vmsgeu:
953     case Intrinsic::riscv_vmsge: {
954       SDValue Src1 = Node->getOperand(1);
955       SDValue Src2 = Node->getOperand(2);
956       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
957       bool IsCmpUnsignedZero = false;
958       // Only custom select scalar second operand.
959       if (Src2.getValueType() != XLenVT)
960         break;
961       // Small constants are handled with patterns.
962       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
963         int64_t CVal = C->getSExtValue();
964         if (CVal >= -15 && CVal <= 16) {
965           if (!IsUnsigned || CVal != 0)
966             break;
967           IsCmpUnsignedZero = true;
968         }
969       }
970       MVT Src1VT = Src1.getSimpleValueType();
971       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
972       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
973       default:
974         llvm_unreachable("Unexpected LMUL!");
975 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
976   case RISCVII::VLMUL::lmulenum:                                               \
977     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
978                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
979     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
980     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
981     break;
982         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
983         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
984         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
985         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
986         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
987         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
988         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
989 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
990       }
991       SDValue SEW = CurDAG->getTargetConstant(
992           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
993       SDValue VL;
994       selectVLOp(Node->getOperand(3), VL);
995 
996       // If vmsgeu with 0 immediate, expand it to vmset.
997       if (IsCmpUnsignedZero) {
998         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
999         return;
1000       }
1001 
1002       // Expand to
1003       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1004       SDValue Cmp = SDValue(
1005           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1006           0);
1007       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1008                                                {Cmp, Cmp, VL, SEW}));
1009       return;
1010     }
1011     case Intrinsic::riscv_vmsgeu_mask:
1012     case Intrinsic::riscv_vmsge_mask: {
1013       SDValue Src1 = Node->getOperand(2);
1014       SDValue Src2 = Node->getOperand(3);
1015       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1016       bool IsCmpUnsignedZero = false;
1017       // Only custom select scalar second operand.
1018       if (Src2.getValueType() != XLenVT)
1019         break;
1020       // Small constants are handled with patterns.
1021       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1022         int64_t CVal = C->getSExtValue();
1023         if (CVal >= -15 && CVal <= 16) {
1024           if (!IsUnsigned || CVal != 0)
1025             break;
1026           IsCmpUnsignedZero = true;
1027         }
1028       }
1029       MVT Src1VT = Src1.getSimpleValueType();
1030       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1031           VMOROpcode;
1032       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1033       default:
1034         llvm_unreachable("Unexpected LMUL!");
1035 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
1036   case RISCVII::VLMUL::lmulenum:                                               \
1037     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1038                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1039     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
1040                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1041     break;
1042         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1043         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1044         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1045         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1046         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1047         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1048         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1049 #undef CASE_VMSLT_OPCODES
1050       }
1051       // Mask operations use the LMUL from the mask type.
1052       switch (RISCVTargetLowering::getLMUL(VT)) {
1053       default:
1054         llvm_unreachable("Unexpected LMUL!");
1055 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
1056   case RISCVII::VLMUL::lmulenum:                                               \
1057     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
1058     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1059     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1060     break;
1061         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1062         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1063         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1064         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1065         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1066         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1067         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1068 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1069       }
1070       SDValue SEW = CurDAG->getTargetConstant(
1071           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1072       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1073       SDValue VL;
1074       selectVLOp(Node->getOperand(5), VL);
1075       SDValue MaskedOff = Node->getOperand(1);
1076       SDValue Mask = Node->getOperand(4);
1077 
1078       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1079       if (IsCmpUnsignedZero) {
1080         // We don't need vmor if the MaskedOff and the Mask are the same
1081         // value.
1082         if (Mask == MaskedOff) {
1083           ReplaceUses(Node, Mask.getNode());
1084           return;
1085         }
1086         ReplaceNode(Node,
1087                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1088                                            {Mask, MaskedOff, VL, MaskSEW}));
1089         return;
1090       }
1091 
1092       // If the MaskedOff value and the Mask are the same value use
1093       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1094       // This avoids needing to copy v0 to vd before starting the next sequence.
1095       if (Mask == MaskedOff) {
1096         SDValue Cmp = SDValue(
1097             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1098             0);
1099         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1100                                                  {Mask, Cmp, VL, MaskSEW}));
1101         return;
1102       }
1103 
1104       // Mask needs to be copied to V0.
1105       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1106                                            RISCV::V0, Mask, SDValue());
1107       SDValue Glue = Chain.getValue(1);
1108       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1109 
1110       // Otherwise use
1111       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1112       SDValue Policy = Node->getOperand(6);
1113       SDValue Cmp = SDValue(CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1114                                                    {MaskedOff, Src1, Src2, V0,
1115                                                     VL, SEW, Policy, Glue}),
1116                             0);
1117       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1118                                                {Cmp, Mask, VL, MaskSEW}));
1119       return;
1120     }
1121     case Intrinsic::riscv_vsetvli_opt:
1122     case Intrinsic::riscv_vsetvlimax_opt:
1123       return selectVSETVLI(Node);
1124     }
1125     break;
1126   }
1127   case ISD::INTRINSIC_W_CHAIN: {
1128     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1129     switch (IntNo) {
1130       // By default we do not custom select any intrinsic.
1131     default:
1132       break;
1133     case Intrinsic::riscv_vsetvli:
1134     case Intrinsic::riscv_vsetvlimax:
1135       return selectVSETVLI(Node);
1136     case Intrinsic::riscv_vlseg2:
1137     case Intrinsic::riscv_vlseg3:
1138     case Intrinsic::riscv_vlseg4:
1139     case Intrinsic::riscv_vlseg5:
1140     case Intrinsic::riscv_vlseg6:
1141     case Intrinsic::riscv_vlseg7:
1142     case Intrinsic::riscv_vlseg8: {
1143       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1144       return;
1145     }
1146     case Intrinsic::riscv_vlseg2_mask:
1147     case Intrinsic::riscv_vlseg3_mask:
1148     case Intrinsic::riscv_vlseg4_mask:
1149     case Intrinsic::riscv_vlseg5_mask:
1150     case Intrinsic::riscv_vlseg6_mask:
1151     case Intrinsic::riscv_vlseg7_mask:
1152     case Intrinsic::riscv_vlseg8_mask: {
1153       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1154       return;
1155     }
1156     case Intrinsic::riscv_vlsseg2:
1157     case Intrinsic::riscv_vlsseg3:
1158     case Intrinsic::riscv_vlsseg4:
1159     case Intrinsic::riscv_vlsseg5:
1160     case Intrinsic::riscv_vlsseg6:
1161     case Intrinsic::riscv_vlsseg7:
1162     case Intrinsic::riscv_vlsseg8: {
1163       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1164       return;
1165     }
1166     case Intrinsic::riscv_vlsseg2_mask:
1167     case Intrinsic::riscv_vlsseg3_mask:
1168     case Intrinsic::riscv_vlsseg4_mask:
1169     case Intrinsic::riscv_vlsseg5_mask:
1170     case Intrinsic::riscv_vlsseg6_mask:
1171     case Intrinsic::riscv_vlsseg7_mask:
1172     case Intrinsic::riscv_vlsseg8_mask: {
1173       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1174       return;
1175     }
1176     case Intrinsic::riscv_vloxseg2:
1177     case Intrinsic::riscv_vloxseg3:
1178     case Intrinsic::riscv_vloxseg4:
1179     case Intrinsic::riscv_vloxseg5:
1180     case Intrinsic::riscv_vloxseg6:
1181     case Intrinsic::riscv_vloxseg7:
1182     case Intrinsic::riscv_vloxseg8:
1183       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1184       return;
1185     case Intrinsic::riscv_vluxseg2:
1186     case Intrinsic::riscv_vluxseg3:
1187     case Intrinsic::riscv_vluxseg4:
1188     case Intrinsic::riscv_vluxseg5:
1189     case Intrinsic::riscv_vluxseg6:
1190     case Intrinsic::riscv_vluxseg7:
1191     case Intrinsic::riscv_vluxseg8:
1192       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1193       return;
1194     case Intrinsic::riscv_vloxseg2_mask:
1195     case Intrinsic::riscv_vloxseg3_mask:
1196     case Intrinsic::riscv_vloxseg4_mask:
1197     case Intrinsic::riscv_vloxseg5_mask:
1198     case Intrinsic::riscv_vloxseg6_mask:
1199     case Intrinsic::riscv_vloxseg7_mask:
1200     case Intrinsic::riscv_vloxseg8_mask:
1201       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1202       return;
1203     case Intrinsic::riscv_vluxseg2_mask:
1204     case Intrinsic::riscv_vluxseg3_mask:
1205     case Intrinsic::riscv_vluxseg4_mask:
1206     case Intrinsic::riscv_vluxseg5_mask:
1207     case Intrinsic::riscv_vluxseg6_mask:
1208     case Intrinsic::riscv_vluxseg7_mask:
1209     case Intrinsic::riscv_vluxseg8_mask:
1210       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1211       return;
1212     case Intrinsic::riscv_vlseg8ff:
1213     case Intrinsic::riscv_vlseg7ff:
1214     case Intrinsic::riscv_vlseg6ff:
1215     case Intrinsic::riscv_vlseg5ff:
1216     case Intrinsic::riscv_vlseg4ff:
1217     case Intrinsic::riscv_vlseg3ff:
1218     case Intrinsic::riscv_vlseg2ff: {
1219       selectVLSEGFF(Node, /*IsMasked*/ false);
1220       return;
1221     }
1222     case Intrinsic::riscv_vlseg8ff_mask:
1223     case Intrinsic::riscv_vlseg7ff_mask:
1224     case Intrinsic::riscv_vlseg6ff_mask:
1225     case Intrinsic::riscv_vlseg5ff_mask:
1226     case Intrinsic::riscv_vlseg4ff_mask:
1227     case Intrinsic::riscv_vlseg3ff_mask:
1228     case Intrinsic::riscv_vlseg2ff_mask: {
1229       selectVLSEGFF(Node, /*IsMasked*/ true);
1230       return;
1231     }
1232     case Intrinsic::riscv_vloxei:
1233     case Intrinsic::riscv_vloxei_mask:
1234     case Intrinsic::riscv_vluxei:
1235     case Intrinsic::riscv_vluxei_mask: {
1236       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1237                       IntNo == Intrinsic::riscv_vluxei_mask;
1238       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1239                        IntNo == Intrinsic::riscv_vloxei_mask;
1240 
1241       MVT VT = Node->getSimpleValueType(0);
1242       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1243 
1244       unsigned CurOp = 2;
1245       // Masked intrinsic only have TU version pseduo instructions.
1246       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1247       SmallVector<SDValue, 8> Operands;
1248       if (IsTU)
1249         Operands.push_back(Node->getOperand(CurOp++));
1250       else
1251         // Skip the undef passthru operand for nomask TA version pseudo
1252         CurOp++;
1253 
1254       MVT IndexVT;
1255       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1256                                  /*IsStridedOrIndexed*/ true, Operands,
1257                                  /*IsLoad=*/true, &IndexVT);
1258 
1259       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1260              "Element count mismatch");
1261 
1262       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1263       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1264       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1265       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1266         report_fatal_error("The V extension does not support EEW=64 for index "
1267                            "values when XLEN=32");
1268       }
1269       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1270           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1271           static_cast<unsigned>(IndexLMUL));
1272       MachineSDNode *Load =
1273           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1274 
1275       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1276         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1277 
1278       ReplaceNode(Node, Load);
1279       return;
1280     }
1281     case Intrinsic::riscv_vlm:
1282     case Intrinsic::riscv_vle:
1283     case Intrinsic::riscv_vle_mask:
1284     case Intrinsic::riscv_vlse:
1285     case Intrinsic::riscv_vlse_mask: {
1286       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1287                       IntNo == Intrinsic::riscv_vlse_mask;
1288       bool IsStrided =
1289           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1290 
1291       MVT VT = Node->getSimpleValueType(0);
1292       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1293 
1294       unsigned CurOp = 2;
1295       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1296       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1297       // Masked intrinsic only have TU version pseduo instructions.
1298       bool IsTU =
1299           HasPassthruOperand &&
1300           ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
1301       SmallVector<SDValue, 8> Operands;
1302       if (IsTU)
1303         Operands.push_back(Node->getOperand(CurOp++));
1304       else if (HasPassthruOperand)
1305         // Skip the undef passthru operand for nomask TA version pseudo
1306         CurOp++;
1307 
1308       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1309                                  Operands, /*IsLoad=*/true);
1310 
1311       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1312       const RISCV::VLEPseudo *P =
1313           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1314                               static_cast<unsigned>(LMUL));
1315       MachineSDNode *Load =
1316           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1317 
1318       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1319         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1320 
1321       ReplaceNode(Node, Load);
1322       return;
1323     }
1324     case Intrinsic::riscv_vleff:
1325     case Intrinsic::riscv_vleff_mask: {
1326       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1327 
1328       MVT VT = Node->getSimpleValueType(0);
1329       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1330 
1331       unsigned CurOp = 2;
1332       // Masked intrinsic only have TU version pseduo instructions.
1333       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1334       SmallVector<SDValue, 7> Operands;
1335       if (IsTU)
1336         Operands.push_back(Node->getOperand(CurOp++));
1337       else
1338         // Skip the undef passthru operand for nomask TA version pseudo
1339         CurOp++;
1340 
1341       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1342                                  /*IsStridedOrIndexed*/ false, Operands,
1343                                  /*IsLoad=*/true);
1344 
1345       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1346       const RISCV::VLEPseudo *P =
1347           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1348                               Log2SEW, static_cast<unsigned>(LMUL));
1349       MachineSDNode *Load =
1350           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1351                                  MVT::Other, MVT::Glue, Operands);
1352       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1353                                               /*Glue*/ SDValue(Load, 2));
1354 
1355       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1356         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1357 
1358       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1359       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1360       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1361       CurDAG->RemoveDeadNode(Node);
1362       return;
1363     }
1364     }
1365     break;
1366   }
1367   case ISD::INTRINSIC_VOID: {
1368     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1369     switch (IntNo) {
1370     case Intrinsic::riscv_vsseg2:
1371     case Intrinsic::riscv_vsseg3:
1372     case Intrinsic::riscv_vsseg4:
1373     case Intrinsic::riscv_vsseg5:
1374     case Intrinsic::riscv_vsseg6:
1375     case Intrinsic::riscv_vsseg7:
1376     case Intrinsic::riscv_vsseg8: {
1377       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1378       return;
1379     }
1380     case Intrinsic::riscv_vsseg2_mask:
1381     case Intrinsic::riscv_vsseg3_mask:
1382     case Intrinsic::riscv_vsseg4_mask:
1383     case Intrinsic::riscv_vsseg5_mask:
1384     case Intrinsic::riscv_vsseg6_mask:
1385     case Intrinsic::riscv_vsseg7_mask:
1386     case Intrinsic::riscv_vsseg8_mask: {
1387       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1388       return;
1389     }
1390     case Intrinsic::riscv_vssseg2:
1391     case Intrinsic::riscv_vssseg3:
1392     case Intrinsic::riscv_vssseg4:
1393     case Intrinsic::riscv_vssseg5:
1394     case Intrinsic::riscv_vssseg6:
1395     case Intrinsic::riscv_vssseg7:
1396     case Intrinsic::riscv_vssseg8: {
1397       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1398       return;
1399     }
1400     case Intrinsic::riscv_vssseg2_mask:
1401     case Intrinsic::riscv_vssseg3_mask:
1402     case Intrinsic::riscv_vssseg4_mask:
1403     case Intrinsic::riscv_vssseg5_mask:
1404     case Intrinsic::riscv_vssseg6_mask:
1405     case Intrinsic::riscv_vssseg7_mask:
1406     case Intrinsic::riscv_vssseg8_mask: {
1407       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1408       return;
1409     }
1410     case Intrinsic::riscv_vsoxseg2:
1411     case Intrinsic::riscv_vsoxseg3:
1412     case Intrinsic::riscv_vsoxseg4:
1413     case Intrinsic::riscv_vsoxseg5:
1414     case Intrinsic::riscv_vsoxseg6:
1415     case Intrinsic::riscv_vsoxseg7:
1416     case Intrinsic::riscv_vsoxseg8:
1417       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1418       return;
1419     case Intrinsic::riscv_vsuxseg2:
1420     case Intrinsic::riscv_vsuxseg3:
1421     case Intrinsic::riscv_vsuxseg4:
1422     case Intrinsic::riscv_vsuxseg5:
1423     case Intrinsic::riscv_vsuxseg6:
1424     case Intrinsic::riscv_vsuxseg7:
1425     case Intrinsic::riscv_vsuxseg8:
1426       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1427       return;
1428     case Intrinsic::riscv_vsoxseg2_mask:
1429     case Intrinsic::riscv_vsoxseg3_mask:
1430     case Intrinsic::riscv_vsoxseg4_mask:
1431     case Intrinsic::riscv_vsoxseg5_mask:
1432     case Intrinsic::riscv_vsoxseg6_mask:
1433     case Intrinsic::riscv_vsoxseg7_mask:
1434     case Intrinsic::riscv_vsoxseg8_mask:
1435       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1436       return;
1437     case Intrinsic::riscv_vsuxseg2_mask:
1438     case Intrinsic::riscv_vsuxseg3_mask:
1439     case Intrinsic::riscv_vsuxseg4_mask:
1440     case Intrinsic::riscv_vsuxseg5_mask:
1441     case Intrinsic::riscv_vsuxseg6_mask:
1442     case Intrinsic::riscv_vsuxseg7_mask:
1443     case Intrinsic::riscv_vsuxseg8_mask:
1444       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1445       return;
1446     case Intrinsic::riscv_vsoxei:
1447     case Intrinsic::riscv_vsoxei_mask:
1448     case Intrinsic::riscv_vsuxei:
1449     case Intrinsic::riscv_vsuxei_mask: {
1450       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1451                       IntNo == Intrinsic::riscv_vsuxei_mask;
1452       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1453                        IntNo == Intrinsic::riscv_vsoxei_mask;
1454 
1455       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1456       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1457 
1458       unsigned CurOp = 2;
1459       SmallVector<SDValue, 8> Operands;
1460       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1461 
1462       MVT IndexVT;
1463       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1464                                  /*IsStridedOrIndexed*/ true, Operands,
1465                                  /*IsLoad=*/false, &IndexVT);
1466 
1467       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1468              "Element count mismatch");
1469 
1470       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1471       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1472       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1473       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1474         report_fatal_error("The V extension does not support EEW=64 for index "
1475                            "values when XLEN=32");
1476       }
1477       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1478           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1479           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1480       MachineSDNode *Store =
1481           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1482 
1483       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1484         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1485 
1486       ReplaceNode(Node, Store);
1487       return;
1488     }
1489     case Intrinsic::riscv_vsm:
1490     case Intrinsic::riscv_vse:
1491     case Intrinsic::riscv_vse_mask:
1492     case Intrinsic::riscv_vsse:
1493     case Intrinsic::riscv_vsse_mask: {
1494       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1495                       IntNo == Intrinsic::riscv_vsse_mask;
1496       bool IsStrided =
1497           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1498 
1499       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1500       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1501 
1502       unsigned CurOp = 2;
1503       SmallVector<SDValue, 8> Operands;
1504       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1505 
1506       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1507                                  Operands);
1508 
1509       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1510       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1511           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1512       MachineSDNode *Store =
1513           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1514       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1515         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1516 
1517       ReplaceNode(Node, Store);
1518       return;
1519     }
1520     }
1521     break;
1522   }
1523   case ISD::BITCAST: {
1524     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1525     // Just drop bitcasts between vectors if both are fixed or both are
1526     // scalable.
1527     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1528         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1529       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1530       CurDAG->RemoveDeadNode(Node);
1531       return;
1532     }
1533     break;
1534   }
1535   case ISD::INSERT_SUBVECTOR: {
1536     SDValue V = Node->getOperand(0);
1537     SDValue SubV = Node->getOperand(1);
1538     SDLoc DL(SubV);
1539     auto Idx = Node->getConstantOperandVal(2);
1540     MVT SubVecVT = SubV.getSimpleValueType();
1541 
1542     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1543     MVT SubVecContainerVT = SubVecVT;
1544     // Establish the correct scalable-vector types for any fixed-length type.
1545     if (SubVecVT.isFixedLengthVector())
1546       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1547     if (VT.isFixedLengthVector())
1548       VT = TLI.getContainerForFixedLengthVector(VT);
1549 
1550     const auto *TRI = Subtarget->getRegisterInfo();
1551     unsigned SubRegIdx;
1552     std::tie(SubRegIdx, Idx) =
1553         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1554             VT, SubVecContainerVT, Idx, TRI);
1555 
1556     // If the Idx hasn't been completely eliminated then this is a subvector
1557     // insert which doesn't naturally align to a vector register. These must
1558     // be handled using instructions to manipulate the vector registers.
1559     if (Idx != 0)
1560       break;
1561 
1562     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1563     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1564                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1565                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1566     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1567     assert((!IsSubVecPartReg || V.isUndef()) &&
1568            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1569            "the subvector is smaller than a full-sized register");
1570 
1571     // If we haven't set a SubRegIdx, then we must be going between
1572     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1573     if (SubRegIdx == RISCV::NoSubRegister) {
1574       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1575       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1576                  InRegClassID &&
1577              "Unexpected subvector extraction");
1578       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1579       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1580                                                DL, VT, SubV, RC);
1581       ReplaceNode(Node, NewNode);
1582       return;
1583     }
1584 
1585     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1586     ReplaceNode(Node, Insert.getNode());
1587     return;
1588   }
1589   case ISD::EXTRACT_SUBVECTOR: {
1590     SDValue V = Node->getOperand(0);
1591     auto Idx = Node->getConstantOperandVal(1);
1592     MVT InVT = V.getSimpleValueType();
1593     SDLoc DL(V);
1594 
1595     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1596     MVT SubVecContainerVT = VT;
1597     // Establish the correct scalable-vector types for any fixed-length type.
1598     if (VT.isFixedLengthVector())
1599       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1600     if (InVT.isFixedLengthVector())
1601       InVT = TLI.getContainerForFixedLengthVector(InVT);
1602 
1603     const auto *TRI = Subtarget->getRegisterInfo();
1604     unsigned SubRegIdx;
1605     std::tie(SubRegIdx, Idx) =
1606         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1607             InVT, SubVecContainerVT, Idx, TRI);
1608 
1609     // If the Idx hasn't been completely eliminated then this is a subvector
1610     // extract which doesn't naturally align to a vector register. These must
1611     // be handled using instructions to manipulate the vector registers.
1612     if (Idx != 0)
1613       break;
1614 
1615     // If we haven't set a SubRegIdx, then we must be going between
1616     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1617     if (SubRegIdx == RISCV::NoSubRegister) {
1618       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1619       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1620                  InRegClassID &&
1621              "Unexpected subvector extraction");
1622       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1623       SDNode *NewNode =
1624           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1625       ReplaceNode(Node, NewNode);
1626       return;
1627     }
1628 
1629     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1630     ReplaceNode(Node, Extract.getNode());
1631     return;
1632   }
1633   case ISD::SPLAT_VECTOR:
1634   case RISCVISD::VMV_S_X_VL:
1635   case RISCVISD::VFMV_S_F_VL:
1636   case RISCVISD::VMV_V_X_VL:
1637   case RISCVISD::VFMV_V_F_VL: {
1638     // Try to match splat of a scalar load to a strided load with stride of x0.
1639     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1640                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1641     bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1642     if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1643       break;
1644     SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1645     auto *Ld = dyn_cast<LoadSDNode>(Src);
1646     if (!Ld)
1647       break;
1648     EVT MemVT = Ld->getMemoryVT();
1649     // The memory VT should be the same size as the element type.
1650     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1651       break;
1652     if (!IsProfitableToFold(Src, Node, Node) ||
1653         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1654       break;
1655 
1656     SDValue VL;
1657     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1658       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1659     else if (IsScalarMove) {
1660       // We could deal with more VL if we update the VSETVLI insert pass to
1661       // avoid introducing more VSETVLI.
1662       if (!isOneConstant(Node->getOperand(2)))
1663         break;
1664       selectVLOp(Node->getOperand(2), VL);
1665     } else
1666       selectVLOp(Node->getOperand(2), VL);
1667 
1668     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1669     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1670 
1671     SDValue Operands[] = {Ld->getBasePtr(),
1672                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1673                           Ld->getChain()};
1674 
1675     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1676     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1677         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1678         Log2SEW, static_cast<unsigned>(LMUL));
1679     MachineSDNode *Load =
1680         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1681 
1682     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1683 
1684     ReplaceNode(Node, Load);
1685     return;
1686   }
1687   }
1688 
1689   // Select the default instruction.
1690   SelectCode(Node);
1691 }
1692 
1693 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1694     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1695   switch (ConstraintID) {
1696   case InlineAsm::Constraint_m:
1697     // We just support simple memory operands that have a single address
1698     // operand and need no special handling.
1699     OutOps.push_back(Op);
1700     return false;
1701   case InlineAsm::Constraint_A:
1702     OutOps.push_back(Op);
1703     return false;
1704   default:
1705     break;
1706   }
1707 
1708   return true;
1709 }
1710 
1711 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1712   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1713     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1714     return true;
1715   }
1716   return false;
1717 }
1718 
1719 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1720   // If this is FrameIndex, select it directly. Otherwise just let it get
1721   // selected to a register independently.
1722   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1723     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1724   else
1725     Base = Addr;
1726   return true;
1727 }
1728 
1729 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1730                                         SDValue &ShAmt) {
1731   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1732   // amount. If there is an AND on the shift amount, we can bypass it if it
1733   // doesn't affect any of those bits.
1734   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1735     const APInt &AndMask = N->getConstantOperandAPInt(1);
1736 
1737     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1738     // mask that covers the bits needed to represent all shift amounts.
1739     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1740     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1741 
1742     if (ShMask.isSubsetOf(AndMask)) {
1743       ShAmt = N.getOperand(0);
1744       return true;
1745     }
1746 
1747     // SimplifyDemandedBits may have optimized the mask so try restoring any
1748     // bits that are known zero.
1749     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1750     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1751       ShAmt = N.getOperand(0);
1752       return true;
1753     }
1754   } else if (N.getOpcode() == ISD::SUB &&
1755              isa<ConstantSDNode>(N.getOperand(0))) {
1756     uint64_t Imm = N.getConstantOperandVal(0);
1757     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1758     // generate a NEG instead of a SUB of a constant.
1759     if (Imm != 0 && Imm % ShiftWidth == 0) {
1760       SDLoc DL(N);
1761       EVT VT = N.getValueType();
1762       SDValue Zero =
1763           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, VT);
1764       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1765       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1766                                                   N.getOperand(1));
1767       ShAmt = SDValue(Neg, 0);
1768       return true;
1769     }
1770   }
1771 
1772   ShAmt = N;
1773   return true;
1774 }
1775 
1776 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1777   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1778       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1779     Val = N.getOperand(0);
1780     return true;
1781   }
1782   MVT VT = N.getSimpleValueType();
1783   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1784     Val = N;
1785     return true;
1786   }
1787 
1788   return false;
1789 }
1790 
1791 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1792   if (N.getOpcode() == ISD::AND) {
1793     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1794     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1795       Val = N.getOperand(0);
1796       return true;
1797     }
1798   }
1799   MVT VT = N.getSimpleValueType();
1800   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1801   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1802     Val = N;
1803     return true;
1804   }
1805 
1806   return false;
1807 }
1808 
1809 // Return true if all users of this SDNode* only consume the lower \p Bits.
1810 // This can be used to form W instructions for add/sub/mul/shl even when the
1811 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1812 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1813 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1814 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1815 // may be able to use a W instruction and CSE with the other instruction if
1816 // this has happened. We could try to detect that the CSE opportunity exists
1817 // before doing this, but that would be more complicated.
1818 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1819 // opportunities.
1820 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1821   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1822           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1823           Node->getOpcode() == ISD::SRL ||
1824           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1825           isa<ConstantSDNode>(Node)) &&
1826          "Unexpected opcode");
1827 
1828   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1829     SDNode *User = *UI;
1830     // Users of this node should have already been instruction selected
1831     if (!User->isMachineOpcode())
1832       return false;
1833 
1834     // TODO: Add more opcodes?
1835     switch (User->getMachineOpcode()) {
1836     default:
1837       return false;
1838     case RISCV::ADDW:
1839     case RISCV::ADDIW:
1840     case RISCV::SUBW:
1841     case RISCV::MULW:
1842     case RISCV::SLLW:
1843     case RISCV::SLLIW:
1844     case RISCV::SRAW:
1845     case RISCV::SRAIW:
1846     case RISCV::SRLW:
1847     case RISCV::SRLIW:
1848     case RISCV::DIVW:
1849     case RISCV::DIVUW:
1850     case RISCV::REMW:
1851     case RISCV::REMUW:
1852     case RISCV::ROLW:
1853     case RISCV::RORW:
1854     case RISCV::RORIW:
1855     case RISCV::CLZW:
1856     case RISCV::CTZW:
1857     case RISCV::CPOPW:
1858     case RISCV::SLLI_UW:
1859     case RISCV::FMV_W_X:
1860     case RISCV::FCVT_H_W:
1861     case RISCV::FCVT_H_WU:
1862     case RISCV::FCVT_S_W:
1863     case RISCV::FCVT_S_WU:
1864     case RISCV::FCVT_D_W:
1865     case RISCV::FCVT_D_WU:
1866       if (Bits < 32)
1867         return false;
1868       break;
1869     case RISCV::SLLI:
1870       // SLLI only uses the lower (XLen - ShAmt) bits.
1871       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1872         return false;
1873       break;
1874     case RISCV::ANDI:
1875       if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
1876         return false;
1877       break;
1878     case RISCV::SEXT_B:
1879       if (Bits < 8)
1880         return false;
1881       break;
1882     case RISCV::SEXT_H:
1883     case RISCV::FMV_H_X:
1884     case RISCV::ZEXT_H_RV32:
1885     case RISCV::ZEXT_H_RV64:
1886       if (Bits < 16)
1887         return false;
1888       break;
1889     case RISCV::ADD_UW:
1890     case RISCV::SH1ADD_UW:
1891     case RISCV::SH2ADD_UW:
1892     case RISCV::SH3ADD_UW:
1893       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1894       // 32 bits.
1895       if (UI.getOperandNo() != 0 || Bits < 32)
1896         return false;
1897       break;
1898     case RISCV::SB:
1899       if (UI.getOperandNo() != 0 || Bits < 8)
1900         return false;
1901       break;
1902     case RISCV::SH:
1903       if (UI.getOperandNo() != 0 || Bits < 16)
1904         return false;
1905       break;
1906     case RISCV::SW:
1907       if (UI.getOperandNo() != 0 || Bits < 32)
1908         return false;
1909       break;
1910     }
1911   }
1912 
1913   return true;
1914 }
1915 
1916 // Select VL as a 5 bit immediate or a value that will become a register. This
1917 // allows us to choose betwen VSETIVLI or VSETVLI later.
1918 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1919   auto *C = dyn_cast<ConstantSDNode>(N);
1920   if (C && isUInt<5>(C->getZExtValue())) {
1921     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1922                                    N->getValueType(0));
1923   } else if (C && C->isAllOnesValue()) {
1924     // Treat all ones as VLMax.
1925     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1926                                    N->getValueType(0));
1927   } else if (isa<RegisterSDNode>(N) &&
1928              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
1929     // All our VL operands use an operand that allows GPRNoX0 or an immediate
1930     // as the register class. Convert X0 to a special immediate to pass the
1931     // MachineVerifier. This is recognized specially by the vsetvli insertion
1932     // pass.
1933     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1934                                    N->getValueType(0));
1935   } else {
1936     VL = N;
1937   }
1938 
1939   return true;
1940 }
1941 
1942 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1943   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
1944     return false;
1945   SplatVal = N.getOperand(1);
1946   return true;
1947 }
1948 
1949 using ValidateFn = bool (*)(int64_t);
1950 
1951 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1952                                    SelectionDAG &DAG,
1953                                    const RISCVSubtarget &Subtarget,
1954                                    ValidateFn ValidateImm) {
1955   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
1956       !isa<ConstantSDNode>(N.getOperand(1)))
1957     return false;
1958 
1959   int64_t SplatImm =
1960       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
1961 
1962   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
1963   // type is wider than the resulting vector element type: an implicit
1964   // truncation first takes place. Therefore, perform a manual
1965   // truncation/sign-extension in order to ignore any truncated bits and catch
1966   // any zero-extended immediate.
1967   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1968   // sign-extending to (XLenVT -1).
1969   MVT XLenVT = Subtarget.getXLenVT();
1970   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
1971          "Unexpected splat operand type");
1972   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1973   if (EltVT.bitsLT(XLenVT))
1974     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1975 
1976   if (!ValidateImm(SplatImm))
1977     return false;
1978 
1979   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1980   return true;
1981 }
1982 
1983 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1984   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1985                                 [](int64_t Imm) { return isInt<5>(Imm); });
1986 }
1987 
1988 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1989   return selectVSplatSimmHelper(
1990       N, SplatVal, *CurDAG, *Subtarget,
1991       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1992 }
1993 
1994 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1995                                                       SDValue &SplatVal) {
1996   return selectVSplatSimmHelper(
1997       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1998         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1999       });
2000 }
2001 
2002 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2003   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2004       !isa<ConstantSDNode>(N.getOperand(1)))
2005     return false;
2006 
2007   int64_t SplatImm =
2008       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2009 
2010   if (!isUInt<5>(SplatImm))
2011     return false;
2012 
2013   SplatVal =
2014       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2015 
2016   return true;
2017 }
2018 
2019 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2020                                        SDValue &Imm) {
2021   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2022     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2023 
2024     if (!isInt<5>(ImmVal))
2025       return false;
2026 
2027     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2028     return true;
2029   }
2030 
2031   return false;
2032 }
2033 
2034 // Merge an ADDI into the offset of a load/store instruction where possible.
2035 // (load (addi base, off1), off2) -> (load base, off1+off2)
2036 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
2037 // This is possible when off1+off2 fits a 12-bit immediate.
2038 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
2039   int OffsetOpIdx;
2040   int BaseOpIdx;
2041 
2042   // Only attempt this optimisation for I-type loads and S-type stores.
2043   switch (N->getMachineOpcode()) {
2044   default:
2045     return false;
2046   case RISCV::LB:
2047   case RISCV::LH:
2048   case RISCV::LW:
2049   case RISCV::LBU:
2050   case RISCV::LHU:
2051   case RISCV::LWU:
2052   case RISCV::LD:
2053   case RISCV::FLH:
2054   case RISCV::FLW:
2055   case RISCV::FLD:
2056     BaseOpIdx = 0;
2057     OffsetOpIdx = 1;
2058     break;
2059   case RISCV::SB:
2060   case RISCV::SH:
2061   case RISCV::SW:
2062   case RISCV::SD:
2063   case RISCV::FSH:
2064   case RISCV::FSW:
2065   case RISCV::FSD:
2066     BaseOpIdx = 1;
2067     OffsetOpIdx = 2;
2068     break;
2069   }
2070 
2071   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
2072     return false;
2073 
2074   SDValue Base = N->getOperand(BaseOpIdx);
2075 
2076   // If the base is an ADDI, we can merge it in to the load/store.
2077   if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
2078     return false;
2079 
2080   SDValue ImmOperand = Base.getOperand(1);
2081   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
2082 
2083   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
2084     int64_t Offset1 = Const->getSExtValue();
2085     int64_t CombinedOffset = Offset1 + Offset2;
2086     if (!isInt<12>(CombinedOffset))
2087       return false;
2088     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
2089                                            ImmOperand.getValueType());
2090   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
2091     // If the off1 in (addi base, off1) is a global variable's address (its
2092     // low part, really), then we can rely on the alignment of that variable
2093     // to provide a margin of safety before off1 can overflow the 12 bits.
2094     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
2095     const DataLayout &DL = CurDAG->getDataLayout();
2096     Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
2097     if (Offset2 != 0 && Alignment <= Offset2)
2098       return false;
2099     int64_t Offset1 = GA->getOffset();
2100     int64_t CombinedOffset = Offset1 + Offset2;
2101     ImmOperand = CurDAG->getTargetGlobalAddress(
2102         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
2103         CombinedOffset, GA->getTargetFlags());
2104   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
2105     // Ditto.
2106     Align Alignment = CP->getAlign();
2107     if (Offset2 != 0 && Alignment <= Offset2)
2108       return false;
2109     int64_t Offset1 = CP->getOffset();
2110     int64_t CombinedOffset = Offset1 + Offset2;
2111     ImmOperand = CurDAG->getTargetConstantPool(
2112         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
2113         CombinedOffset, CP->getTargetFlags());
2114   } else {
2115     return false;
2116   }
2117 
2118   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
2119   LLVM_DEBUG(Base->dump(CurDAG));
2120   LLVM_DEBUG(dbgs() << "\nN: ");
2121   LLVM_DEBUG(N->dump(CurDAG));
2122   LLVM_DEBUG(dbgs() << "\n");
2123 
2124   // Modify the offset operand of the load/store.
2125   if (BaseOpIdx == 0) // Load
2126     CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
2127                                N->getOperand(2));
2128   else // Store
2129     CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
2130                                ImmOperand, N->getOperand(3));
2131 
2132   return true;
2133 }
2134 
2135 // Try to remove sext.w if the input is a W instruction or can be made into
2136 // a W instruction cheaply.
2137 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2138   // Look for the sext.w pattern, addiw rd, rs1, 0.
2139   if (N->getMachineOpcode() != RISCV::ADDIW ||
2140       !isNullConstant(N->getOperand(1)))
2141     return false;
2142 
2143   SDValue N0 = N->getOperand(0);
2144   if (!N0.isMachineOpcode())
2145     return false;
2146 
2147   switch (N0.getMachineOpcode()) {
2148   default:
2149     break;
2150   case RISCV::ADD:
2151   case RISCV::ADDI:
2152   case RISCV::SUB:
2153   case RISCV::MUL:
2154   case RISCV::SLLI: {
2155     // Convert sext.w+add/sub/mul to their W instructions. This will create
2156     // a new independent instruction. This improves latency.
2157     unsigned Opc;
2158     switch (N0.getMachineOpcode()) {
2159     default:
2160       llvm_unreachable("Unexpected opcode!");
2161     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2162     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2163     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2164     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2165     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2166     }
2167 
2168     SDValue N00 = N0.getOperand(0);
2169     SDValue N01 = N0.getOperand(1);
2170 
2171     // Shift amount needs to be uimm5.
2172     if (N0.getMachineOpcode() == RISCV::SLLI &&
2173         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2174       break;
2175 
2176     SDNode *Result =
2177         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2178                                N00, N01);
2179     ReplaceUses(N, Result);
2180     return true;
2181   }
2182   case RISCV::ADDW:
2183   case RISCV::ADDIW:
2184   case RISCV::SUBW:
2185   case RISCV::MULW:
2186   case RISCV::SLLIW:
2187     // Result is already sign extended just remove the sext.w.
2188     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2189     ReplaceUses(N, N0.getNode());
2190     return true;
2191   }
2192 
2193   return false;
2194 }
2195 
2196 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2197 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2198 // take the form of a V0 physical register operand, with a glued
2199 // register-setting instruction.
2200 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2201   const RISCV::RISCVMaskedPseudoInfo *I =
2202       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2203   if (!I)
2204     return false;
2205 
2206   unsigned MaskOpIdx = I->MaskOpIdx;
2207 
2208   // Check that we're using V0 as a mask register.
2209   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2210       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2211     return false;
2212 
2213   // The glued user defines V0.
2214   const auto *Glued = N->getGluedNode();
2215 
2216   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2217     return false;
2218 
2219   // Check that we're defining V0 as a mask register.
2220   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2221       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2222     return false;
2223 
2224   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2225   SDValue MaskSetter = Glued->getOperand(2);
2226 
2227   const auto IsVMSet = [](unsigned Opc) {
2228     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2229            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2230            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2231            Opc == RISCV::PseudoVMSET_M_B8;
2232   };
2233 
2234   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2235   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2236   // assume that it's all-ones? Same applies to its VL.
2237   if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2238     return false;
2239 
2240   // Retrieve the tail policy operand index, if any.
2241   Optional<unsigned> TailPolicyOpIdx;
2242   const RISCVInstrInfo *TII = static_cast<const RISCVInstrInfo *>(
2243       CurDAG->getSubtarget().getInstrInfo());
2244 
2245   const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode());
2246 
2247   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2248     // The last operand of the pseudo is the policy op, but we're expecting a
2249     // Glue operand last. We may also have a chain.
2250     TailPolicyOpIdx = N->getNumOperands() - 1;
2251     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2252       (*TailPolicyOpIdx)--;
2253     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2254       (*TailPolicyOpIdx)--;
2255 
2256     // If the policy isn't TAIL_AGNOSTIC we can't perform this optimization.
2257     if (N->getConstantOperandVal(*TailPolicyOpIdx) != RISCVII::TAIL_AGNOSTIC)
2258       return false;
2259   }
2260 
2261   const MCInstrDesc &UnmaskedMCID = TII->get(I->UnmaskedPseudo);
2262 
2263   // Check that we're dropping the merge operand, the mask operand, and any
2264   // policy operand when we transform to this unmasked pseudo.
2265   assert(!RISCVII::hasMergeOp(UnmaskedMCID.TSFlags) &&
2266          RISCVII::hasDummyMaskOp(UnmaskedMCID.TSFlags) &&
2267          !RISCVII::hasVecPolicyOp(UnmaskedMCID.TSFlags) &&
2268          "Unexpected pseudo to transform to");
2269   (void)UnmaskedMCID;
2270 
2271   SmallVector<SDValue, 8> Ops;
2272   // Skip the merge operand at index 0.
2273   for (unsigned I = 1, E = N->getNumOperands(); I != E; I++) {
2274     // Skip the mask, the policy, and the Glue.
2275     SDValue Op = N->getOperand(I);
2276     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2277         Op.getValueType() == MVT::Glue)
2278       continue;
2279     Ops.push_back(Op);
2280   }
2281 
2282   // Transitively apply any node glued to our new node.
2283   if (auto *TGlued = Glued->getGluedNode())
2284     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2285 
2286   SDNode *Result =
2287       CurDAG->getMachineNode(I->UnmaskedPseudo, SDLoc(N), N->getVTList(), Ops);
2288   ReplaceUses(N, Result);
2289 
2290   return true;
2291 }
2292 
2293 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2294 // for instruction scheduling.
2295 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
2296   return new RISCVDAGToDAGISel(TM);
2297 }
2298