1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
45 void RISCVDAGToDAGISel::PreprocessISelDAG() {
46   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
47                                        E = CurDAG->allnodes_end();
48        I != E;) {
49     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50 
51     // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
52     // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
53     if (N->getOpcode() == ISD::SPLAT_VECTOR) {
54       MVT VT = N->getSimpleValueType(0);
55       unsigned Opc =
56           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
57       SDLoc DL(N);
58       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
59       SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
60                                        N->getOperand(0), VL);
61 
62       --I;
63       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
64       ++I;
65       CurDAG->DeleteNode(N);
66       continue;
67     }
68 
69     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70     // load. Done after lowering and combining so that we have a chance to
71     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
73       continue;
74 
75     assert(N->getNumOperands() == 4 && "Unexpected number of operands");
76     MVT VT = N->getSimpleValueType(0);
77     SDValue Passthru = N->getOperand(0);
78     SDValue Lo = N->getOperand(1);
79     SDValue Hi = N->getOperand(2);
80     SDValue VL = N->getOperand(3);
81     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
82            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
83            "Unexpected VTs!");
84     MachineFunction &MF = CurDAG->getMachineFunction();
85     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
86     SDLoc DL(N);
87 
88     // We use the same frame index we use for moving two i32s into 64-bit FPR.
89     // This is an analogous operation.
90     int FI = FuncInfo->getMoveF64FrameIndex(MF);
91     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
92     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
93     SDValue StackSlot =
94         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
95 
96     SDValue Chain = CurDAG->getEntryNode();
97     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
98 
99     SDValue OffsetSlot =
100         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
101     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
102                           Align(8));
103 
104     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
105 
106     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
107     SDValue IntID =
108         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
109     SDValue Ops[] = {Chain,
110                      IntID,
111                      Passthru,
112                      StackSlot,
113                      CurDAG->getRegister(RISCV::X0, MVT::i64),
114                      VL};
115 
116     SDValue Result = CurDAG->getMemIntrinsicNode(
117         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
118         MachineMemOperand::MOLoad);
119 
120     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
121     // vlse we created.  This will cause general havok on the dag because
122     // anything below the conversion could be folded into other existing nodes.
123     // To avoid invalidating 'I', back it up to the convert node.
124     --I;
125     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
126 
127     // Now that we did that, the node is dead.  Increment the iterator to the
128     // next node to process, then delete N.
129     ++I;
130     CurDAG->DeleteNode(N);
131   }
132 }
133 
134 void RISCVDAGToDAGISel::PostprocessISelDAG() {
135   HandleSDNode Dummy(CurDAG->getRoot());
136   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
137 
138   bool MadeChange = false;
139   while (Position != CurDAG->allnodes_begin()) {
140     SDNode *N = &*--Position;
141     // Skip dead nodes and any non-machine opcodes.
142     if (N->use_empty() || !N->isMachineOpcode())
143       continue;
144 
145     MadeChange |= doPeepholeSExtW(N);
146     MadeChange |= doPeepholeLoadStoreADDI(N);
147     MadeChange |= doPeepholeMaskedRVV(N);
148   }
149 
150   CurDAG->setRoot(Dummy.getValue());
151 
152   if (MadeChange)
153     CurDAG->RemoveDeadNodes();
154 }
155 
156 static SDNode *selectImmWithConstantPool(SelectionDAG *CurDAG, const SDLoc &DL,
157                                          const MVT VT, int64_t Imm,
158                                          const RISCVSubtarget &Subtarget) {
159   assert(VT == MVT::i64 && "Expecting MVT::i64");
160   const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
161   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(CurDAG->getConstantPool(
162       ConstantInt::get(EVT(VT).getTypeForEVT(*CurDAG->getContext()), Imm), VT));
163   SDValue Addr = TLI->getAddr(CP, *CurDAG);
164   SDValue Offset = CurDAG->getTargetConstant(0, DL, VT);
165   // Since there is no data race, the chain can be the entry node.
166   SDNode *Load = CurDAG->getMachineNode(RISCV::LD, DL, VT, Addr, Offset,
167                                         CurDAG->getEntryNode());
168   MachineFunction &MF = CurDAG->getMachineFunction();
169   MachineMemOperand *MemOp = MF.getMachineMemOperand(
170       MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
171       LLT(VT), CP->getAlign());
172   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Load), {MemOp});
173   return Load;
174 }
175 
176 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
177                          int64_t Imm, const RISCVSubtarget &Subtarget) {
178   MVT XLenVT = Subtarget.getXLenVT();
179   RISCVMatInt::InstSeq Seq =
180       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
181 
182   // If Imm is expensive to build, then we put it into constant pool.
183   if (Subtarget.useConstantPoolForLargeInts() &&
184       Seq.size() > Subtarget.getMaxBuildIntsCost())
185     return selectImmWithConstantPool(CurDAG, DL, VT, Imm, Subtarget);
186 
187   SDNode *Result = nullptr;
188   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
189   for (RISCVMatInt::Inst &Inst : Seq) {
190     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
191     if (Inst.Opc == RISCV::LUI)
192       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
193     else if (Inst.Opc == RISCV::ADD_UW)
194       Result = CurDAG->getMachineNode(RISCV::ADD_UW, DL, XLenVT, SrcReg,
195                                       CurDAG->getRegister(RISCV::X0, XLenVT));
196     else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
197              Inst.Opc == RISCV::SH3ADD)
198       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
199     else
200       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
201 
202     // Only the first instruction has X0 as its source.
203     SrcReg = SDValue(Result, 0);
204   }
205 
206   return Result;
207 }
208 
209 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
210                                unsigned RegClassID, unsigned SubReg0) {
211   assert(Regs.size() >= 2 && Regs.size() <= 8);
212 
213   SDLoc DL(Regs[0]);
214   SmallVector<SDValue, 8> Ops;
215 
216   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
217 
218   for (unsigned I = 0; I < Regs.size(); ++I) {
219     Ops.push_back(Regs[I]);
220     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
221   }
222   SDNode *N =
223       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
224   return SDValue(N, 0);
225 }
226 
227 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
228                              unsigned NF) {
229   static const unsigned RegClassIDs[] = {
230       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
231       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
232       RISCV::VRN8M1RegClassID};
233 
234   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
235 }
236 
237 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
238                              unsigned NF) {
239   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
240                                          RISCV::VRN3M2RegClassID,
241                                          RISCV::VRN4M2RegClassID};
242 
243   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
244 }
245 
246 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
247                              unsigned NF) {
248   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
249                          RISCV::sub_vrm4_0);
250 }
251 
252 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
253                            unsigned NF, RISCVII::VLMUL LMUL) {
254   switch (LMUL) {
255   default:
256     llvm_unreachable("Invalid LMUL.");
257   case RISCVII::VLMUL::LMUL_F8:
258   case RISCVII::VLMUL::LMUL_F4:
259   case RISCVII::VLMUL::LMUL_F2:
260   case RISCVII::VLMUL::LMUL_1:
261     return createM1Tuple(CurDAG, Regs, NF);
262   case RISCVII::VLMUL::LMUL_2:
263     return createM2Tuple(CurDAG, Regs, NF);
264   case RISCVII::VLMUL::LMUL_4:
265     return createM4Tuple(CurDAG, Regs, NF);
266   }
267 }
268 
269 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
270     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
271     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
272     bool IsLoad, MVT *IndexVT) {
273   SDValue Chain = Node->getOperand(0);
274   SDValue Glue;
275 
276   SDValue Base;
277   SelectBaseAddr(Node->getOperand(CurOp++), Base);
278   Operands.push_back(Base); // Base pointer.
279 
280   if (IsStridedOrIndexed) {
281     Operands.push_back(Node->getOperand(CurOp++)); // Index.
282     if (IndexVT)
283       *IndexVT = Operands.back()->getSimpleValueType(0);
284   }
285 
286   if (IsMasked) {
287     // Mask needs to be copied to V0.
288     SDValue Mask = Node->getOperand(CurOp++);
289     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
290     Glue = Chain.getValue(1);
291     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
292   }
293   SDValue VL;
294   selectVLOp(Node->getOperand(CurOp++), VL);
295   Operands.push_back(VL);
296 
297   MVT XLenVT = Subtarget->getXLenVT();
298   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
299   Operands.push_back(SEWOp);
300 
301   // Masked load has the tail policy argument.
302   if (IsMasked && IsLoad) {
303     // Policy must be a constant.
304     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
305     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
306     Operands.push_back(PolicyOp);
307   }
308 
309   Operands.push_back(Chain); // Chain.
310   if (Glue)
311     Operands.push_back(Glue);
312 }
313 
314 static bool isAllUndef(ArrayRef<SDValue> Values) {
315   return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
316 }
317 
318 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
319                                     bool IsStrided) {
320   SDLoc DL(Node);
321   unsigned NF = Node->getNumValues() - 1;
322   MVT VT = Node->getSimpleValueType(0);
323   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
324   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
325 
326   unsigned CurOp = 2;
327   SmallVector<SDValue, 8> Operands;
328 
329   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
330                                Node->op_begin() + CurOp + NF);
331   bool IsTU = IsMasked || !isAllUndef(Regs);
332   if (IsTU) {
333     SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
334     Operands.push_back(Merge);
335   }
336   CurOp += NF;
337 
338   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
339                              Operands, /*IsLoad=*/true);
340 
341   const RISCV::VLSEGPseudo *P =
342       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
343                             static_cast<unsigned>(LMUL));
344   MachineSDNode *Load =
345       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
346 
347   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
348     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
349 
350   SDValue SuperReg = SDValue(Load, 0);
351   for (unsigned I = 0; I < NF; ++I) {
352     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
353     ReplaceUses(SDValue(Node, I),
354                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
355   }
356 
357   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
358   CurDAG->RemoveDeadNode(Node);
359 }
360 
361 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
362   SDLoc DL(Node);
363   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
364   MVT VT = Node->getSimpleValueType(0);
365   MVT XLenVT = Subtarget->getXLenVT();
366   unsigned SEW = VT.getScalarSizeInBits();
367   unsigned Log2SEW = Log2_32(SEW);
368   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
369 
370   unsigned CurOp = 2;
371   SmallVector<SDValue, 7> Operands;
372 
373   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
374                                Node->op_begin() + CurOp + NF);
375   bool IsTU = IsMasked || !isAllUndef(Regs);
376   if (IsTU) {
377     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
378     Operands.push_back(MaskedOff);
379   }
380   CurOp += NF;
381 
382   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
383                              /*IsStridedOrIndexed*/ false, Operands,
384                              /*IsLoad=*/true);
385 
386   const RISCV::VLSEGPseudo *P =
387       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
388                             Log2SEW, static_cast<unsigned>(LMUL));
389   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
390                                                MVT::Other, MVT::Glue, Operands);
391   bool TailAgnostic = true;
392   bool MaskAgnostic = false;
393   if (IsMasked) {
394     uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1);
395     TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
396     MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
397   }
398   unsigned VType =
399       RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
400   SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
401   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
402                                           VTypeOp, /*Glue*/ SDValue(Load, 2));
403 
404   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
405     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
406 
407   SDValue SuperReg = SDValue(Load, 0);
408   for (unsigned I = 0; I < NF; ++I) {
409     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
410     ReplaceUses(SDValue(Node, I),
411                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
412   }
413 
414   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
415   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
416   CurDAG->RemoveDeadNode(Node);
417 }
418 
419 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
420                                      bool IsOrdered) {
421   SDLoc DL(Node);
422   unsigned NF = Node->getNumValues() - 1;
423   MVT VT = Node->getSimpleValueType(0);
424   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
425   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
426 
427   unsigned CurOp = 2;
428   SmallVector<SDValue, 8> Operands;
429 
430   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
431                                Node->op_begin() + CurOp + NF);
432   bool IsTU = IsMasked || !isAllUndef(Regs);
433   if (IsTU) {
434     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
435     Operands.push_back(MaskedOff);
436   }
437   CurOp += NF;
438 
439   MVT IndexVT;
440   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
441                              /*IsStridedOrIndexed*/ true, Operands,
442                              /*IsLoad=*/true, &IndexVT);
443 
444   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
445          "Element count mismatch");
446 
447   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
448   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
449   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
450     report_fatal_error("The V extension does not support EEW=64 for index "
451                        "values when XLEN=32");
452   }
453   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
454       NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
455       static_cast<unsigned>(IndexLMUL));
456   MachineSDNode *Load =
457       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
458 
459   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
460     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
461 
462   SDValue SuperReg = SDValue(Load, 0);
463   for (unsigned I = 0; I < NF; ++I) {
464     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
465     ReplaceUses(SDValue(Node, I),
466                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
467   }
468 
469   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
470   CurDAG->RemoveDeadNode(Node);
471 }
472 
473 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
474                                     bool IsStrided) {
475   SDLoc DL(Node);
476   unsigned NF = Node->getNumOperands() - 4;
477   if (IsStrided)
478     NF--;
479   if (IsMasked)
480     NF--;
481   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
482   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
483   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
484   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
485   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
486 
487   SmallVector<SDValue, 8> Operands;
488   Operands.push_back(StoreVal);
489   unsigned CurOp = 2 + NF;
490 
491   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
492                              Operands);
493 
494   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
495       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
496   MachineSDNode *Store =
497       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
498 
499   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
500     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
501 
502   ReplaceNode(Node, Store);
503 }
504 
505 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
506                                      bool IsOrdered) {
507   SDLoc DL(Node);
508   unsigned NF = Node->getNumOperands() - 5;
509   if (IsMasked)
510     --NF;
511   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
512   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
513   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
514   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
515   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
516 
517   SmallVector<SDValue, 8> Operands;
518   Operands.push_back(StoreVal);
519   unsigned CurOp = 2 + NF;
520 
521   MVT IndexVT;
522   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
523                              /*IsStridedOrIndexed*/ true, Operands,
524                              /*IsLoad=*/false, &IndexVT);
525 
526   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
527          "Element count mismatch");
528 
529   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
530   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
531   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
532     report_fatal_error("The V extension does not support EEW=64 for index "
533                        "values when XLEN=32");
534   }
535   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
536       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
537       static_cast<unsigned>(IndexLMUL));
538   MachineSDNode *Store =
539       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
540 
541   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
542     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
543 
544   ReplaceNode(Node, Store);
545 }
546 
547 void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
548   if (!Subtarget->hasVInstructions())
549     return;
550 
551   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
552           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
553          "Unexpected opcode");
554 
555   SDLoc DL(Node);
556   MVT XLenVT = Subtarget->getXLenVT();
557 
558   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
559   unsigned IntNoOffset = HasChain ? 1 : 0;
560   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
561 
562   assert((IntNo == Intrinsic::riscv_vsetvli ||
563           IntNo == Intrinsic::riscv_vsetvlimax ||
564           IntNo == Intrinsic::riscv_vsetvli_opt ||
565           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
566          "Unexpected vsetvli intrinsic");
567 
568   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
569                IntNo == Intrinsic::riscv_vsetvlimax_opt;
570   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
571 
572   assert(Node->getNumOperands() == Offset + 2 &&
573          "Unexpected number of operands");
574 
575   unsigned SEW =
576       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
577   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
578       Node->getConstantOperandVal(Offset + 1) & 0x7);
579 
580   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
581                                             /*MaskAgnostic*/ false);
582   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
583 
584   SmallVector<EVT, 2> VTs = {XLenVT};
585   if (HasChain)
586     VTs.push_back(MVT::Other);
587 
588   SDValue VLOperand;
589   unsigned Opcode = RISCV::PseudoVSETVLI;
590   if (VLMax) {
591     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
592     Opcode = RISCV::PseudoVSETVLIX0;
593   } else {
594     VLOperand = Node->getOperand(IntNoOffset + 1);
595 
596     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
597       uint64_t AVL = C->getZExtValue();
598       if (isUInt<5>(AVL)) {
599         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
600         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
601         if (HasChain)
602           Ops.push_back(Node->getOperand(0));
603         ReplaceNode(
604             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
605         return;
606       }
607     }
608   }
609 
610   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
611   if (HasChain)
612     Ops.push_back(Node->getOperand(0));
613 
614   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
615 }
616 
617 void RISCVDAGToDAGISel::Select(SDNode *Node) {
618   // If we have a custom node, we have already selected.
619   if (Node->isMachineOpcode()) {
620     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
621     Node->setNodeId(-1);
622     return;
623   }
624 
625   // Instruction Selection not handled by the auto-generated tablegen selection
626   // should be handled here.
627   unsigned Opcode = Node->getOpcode();
628   MVT XLenVT = Subtarget->getXLenVT();
629   SDLoc DL(Node);
630   MVT VT = Node->getSimpleValueType(0);
631 
632   switch (Opcode) {
633   case ISD::Constant: {
634     auto *ConstNode = cast<ConstantSDNode>(Node);
635     if (VT == XLenVT && ConstNode->isZero()) {
636       SDValue New =
637           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
638       ReplaceNode(Node, New.getNode());
639       return;
640     }
641     int64_t Imm = ConstNode->getSExtValue();
642     // If the upper XLen-16 bits are not used, try to convert this to a simm12
643     // by sign extending bit 15.
644     if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
645         hasAllHUsers(Node))
646       Imm = SignExtend64(Imm, 16);
647     // If the upper 32-bits are not used try to convert this into a simm32 by
648     // sign extending bit 32.
649     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
650       Imm = SignExtend64(Imm, 32);
651 
652     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
653     return;
654   }
655   case ISD::FrameIndex: {
656     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
657     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
658     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
659     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
660     return;
661   }
662   case ISD::SRL: {
663     // Optimize (srl (and X, C2), C) ->
664     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
665     // Where C2 is a mask with C3 trailing ones.
666     // Taking into account that the C2 may have had lower bits unset by
667     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
668     // This pattern occurs when type legalizing right shifts for types with
669     // less than XLen bits.
670     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
671     if (!N1C)
672       break;
673     SDValue N0 = Node->getOperand(0);
674     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
675         !isa<ConstantSDNode>(N0.getOperand(1)))
676       break;
677     unsigned ShAmt = N1C->getZExtValue();
678     uint64_t Mask = N0.getConstantOperandVal(1);
679     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
680     if (!isMask_64(Mask))
681       break;
682     unsigned TrailingOnes = countTrailingOnes(Mask);
683     // 32 trailing ones should use srliw via tablegen pattern.
684     if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
685       break;
686     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
687     SDNode *SLLI =
688         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
689                                CurDAG->getTargetConstant(LShAmt, DL, VT));
690     SDNode *SRLI = CurDAG->getMachineNode(
691         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
692         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
693     ReplaceNode(Node, SRLI);
694     return;
695   }
696   case ISD::SRA: {
697     // Optimize (sra (sext_inreg X, i16), C) ->
698     //          (srai (slli X, (XLen-16), (XLen-16) + C)
699     // And      (sra (sext_inreg X, i8), C) ->
700     //          (srai (slli X, (XLen-8), (XLen-8) + C)
701     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
702     // This transform matches the code we get without Zbb. The shifts are more
703     // compressible, and this can help expose CSE opportunities in the sdiv by
704     // constant optimization.
705     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
706     if (!N1C)
707       break;
708     SDValue N0 = Node->getOperand(0);
709     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
710       break;
711     unsigned ShAmt = N1C->getZExtValue();
712     unsigned ExtSize =
713         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
714     // ExtSize of 32 should use sraiw via tablegen pattern.
715     if (ExtSize >= 32 || ShAmt >= ExtSize)
716       break;
717     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
718     SDNode *SLLI =
719         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
720                                CurDAG->getTargetConstant(LShAmt, DL, VT));
721     SDNode *SRAI = CurDAG->getMachineNode(
722         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
723         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
724     ReplaceNode(Node, SRAI);
725     return;
726   }
727   case ISD::AND: {
728     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
729     if (!N1C)
730       break;
731 
732     SDValue N0 = Node->getOperand(0);
733 
734     bool LeftShift = N0.getOpcode() == ISD::SHL;
735     if (!LeftShift && N0.getOpcode() != ISD::SRL)
736       break;
737 
738     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
739     if (!C)
740       break;
741     uint64_t C2 = C->getZExtValue();
742     unsigned XLen = Subtarget->getXLen();
743     if (!C2 || C2 >= XLen)
744       break;
745 
746     uint64_t C1 = N1C->getZExtValue();
747 
748     // Keep track of whether this is a c.andi. If we can't use c.andi, the
749     // shift pair might offer more compression opportunities.
750     // TODO: We could check for C extension here, but we don't have many lit
751     // tests with the C extension enabled so not checking gets better coverage.
752     // TODO: What if ANDI faster than shift?
753     bool IsCANDI = isInt<6>(N1C->getSExtValue());
754 
755     // Clear irrelevant bits in the mask.
756     if (LeftShift)
757       C1 &= maskTrailingZeros<uint64_t>(C2);
758     else
759       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
760 
761     // Some transforms should only be done if the shift has a single use or
762     // the AND would become (srli (slli X, 32), 32)
763     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
764 
765     SDValue X = N0.getOperand(0);
766 
767     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
768     // with c3 leading zeros.
769     if (!LeftShift && isMask_64(C1)) {
770       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
771       if (C2 < C3) {
772         // If the number of leading zeros is C2+32 this can be SRLIW.
773         if (C2 + 32 == C3) {
774           SDNode *SRLIW =
775               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
776                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
777           ReplaceNode(Node, SRLIW);
778           return;
779         }
780 
781         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
782         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
783         //
784         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
785         // legalized and goes through DAG combine.
786         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
787             X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
788             cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
789           SDNode *SRAIW =
790               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, X.getOperand(0),
791                                      CurDAG->getTargetConstant(31, DL, XLenVT));
792           SDNode *SRLIW = CurDAG->getMachineNode(
793               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
794               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
795           ReplaceNode(Node, SRLIW);
796           return;
797         }
798 
799         // (srli (slli x, c3-c2), c3).
800         // Skip if we could use (zext.w (sraiw X, C2)).
801         bool Skip = Subtarget->hasStdExtZba() && C3 == 32 &&
802                     X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
803                     cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
804         // Also Skip if we can use bexti.
805         Skip |= Subtarget->hasStdExtZbs() && C3 == XLen - 1;
806         if (OneUseOrZExtW && !Skip) {
807           SDNode *SLLI = CurDAG->getMachineNode(
808               RISCV::SLLI, DL, XLenVT, X,
809               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
810           SDNode *SRLI =
811               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
812                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
813           ReplaceNode(Node, SRLI);
814           return;
815         }
816       }
817     }
818 
819     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
820     // shifted by c2 bits with c3 leading zeros.
821     if (LeftShift && isShiftedMask_64(C1)) {
822       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
823 
824       if (C2 + C3 < XLen &&
825           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
826         // Use slli.uw when possible.
827         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
828           SDNode *SLLI_UW =
829               CurDAG->getMachineNode(RISCV::SLLI_UW, DL, XLenVT, X,
830                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
831           ReplaceNode(Node, SLLI_UW);
832           return;
833         }
834 
835         // (srli (slli c2+c3), c3)
836         if (OneUseOrZExtW && !IsCANDI) {
837           SDNode *SLLI = CurDAG->getMachineNode(
838               RISCV::SLLI, DL, XLenVT, X,
839               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
840           SDNode *SRLI =
841               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
842                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
843           ReplaceNode(Node, SRLI);
844           return;
845         }
846       }
847     }
848 
849     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
850     // shifted mask with c2 leading zeros and c3 trailing zeros.
851     if (!LeftShift && isShiftedMask_64(C1)) {
852       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
853       uint64_t C3 = countTrailingZeros(C1);
854       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsCANDI) {
855         unsigned SrliOpc = RISCV::SRLI;
856         // If the input is zexti32 we should use SRLIW.
857         if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
858             X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
859           SrliOpc = RISCV::SRLIW;
860           X = X.getOperand(0);
861         }
862         SDNode *SRLI = CurDAG->getMachineNode(
863             SrliOpc, DL, XLenVT, X,
864             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
865         SDNode *SLLI =
866             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
867                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
868         ReplaceNode(Node, SLLI);
869         return;
870       }
871       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
872       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
873           OneUseOrZExtW && !IsCANDI) {
874         SDNode *SRLIW = CurDAG->getMachineNode(
875             RISCV::SRLIW, DL, XLenVT, X,
876             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
877         SDNode *SLLI =
878             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
879                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
880         ReplaceNode(Node, SLLI);
881         return;
882       }
883     }
884 
885     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
886     // shifted mask with no leading zeros and c3 trailing zeros.
887     if (LeftShift && isShiftedMask_64(C1)) {
888       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
889       uint64_t C3 = countTrailingZeros(C1);
890       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsCANDI) {
891         SDNode *SRLI = CurDAG->getMachineNode(
892             RISCV::SRLI, DL, XLenVT, X,
893             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
894         SDNode *SLLI =
895             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
896                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
897         ReplaceNode(Node, SLLI);
898         return;
899       }
900       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
901       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
902         SDNode *SRLIW = CurDAG->getMachineNode(
903             RISCV::SRLIW, DL, XLenVT, X,
904             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
905         SDNode *SLLI =
906             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
907                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
908         ReplaceNode(Node, SLLI);
909         return;
910       }
911     }
912 
913     break;
914   }
915   case ISD::MUL: {
916     // Special case for calculating (mul (and X, C2), C1) where the full product
917     // fits in XLen bits. We can shift X left by the number of leading zeros in
918     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
919     // product has XLen trailing zeros, putting it in the output of MULHU. This
920     // can avoid materializing a constant in a register for C2.
921 
922     // RHS should be a constant.
923     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
924     if (!N1C || !N1C->hasOneUse())
925       break;
926 
927     // LHS should be an AND with constant.
928     SDValue N0 = Node->getOperand(0);
929     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
930       break;
931 
932     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
933 
934     // Constant should be a mask.
935     if (!isMask_64(C2))
936       break;
937 
938     // This should be the only use of the AND unless we will use
939     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
940     // constants.
941     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
942       break;
943 
944     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
945     // optimization.
946     if (isInt<12>(C2) ||
947         (C2 == UINT64_C(0xFFFF) &&
948          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
949         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
950       break;
951 
952     // We need to shift left the AND input and C1 by a total of XLen bits.
953 
954     // How far left do we need to shift the AND input?
955     unsigned XLen = Subtarget->getXLen();
956     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
957 
958     // The constant gets shifted by the remaining amount unless that would
959     // shift bits out.
960     uint64_t C1 = N1C->getZExtValue();
961     unsigned ConstantShift = XLen - LeadingZeros;
962     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
963       break;
964 
965     uint64_t ShiftedC1 = C1 << ConstantShift;
966     // If this RV32, we need to sign extend the constant.
967     if (XLen == 32)
968       ShiftedC1 = SignExtend64(ShiftedC1, 32);
969 
970     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
971     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
972     SDNode *SLLI =
973         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
974                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
975     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
976                                            SDValue(SLLI, 0), SDValue(Imm, 0));
977     ReplaceNode(Node, MULHU);
978     return;
979   }
980   case ISD::INTRINSIC_WO_CHAIN: {
981     unsigned IntNo = Node->getConstantOperandVal(0);
982     switch (IntNo) {
983       // By default we do not custom select any intrinsic.
984     default:
985       break;
986     case Intrinsic::riscv_vmsgeu:
987     case Intrinsic::riscv_vmsge: {
988       SDValue Src1 = Node->getOperand(1);
989       SDValue Src2 = Node->getOperand(2);
990       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
991       bool IsCmpUnsignedZero = false;
992       // Only custom select scalar second operand.
993       if (Src2.getValueType() != XLenVT)
994         break;
995       // Small constants are handled with patterns.
996       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
997         int64_t CVal = C->getSExtValue();
998         if (CVal >= -15 && CVal <= 16) {
999           if (!IsUnsigned || CVal != 0)
1000             break;
1001           IsCmpUnsignedZero = true;
1002         }
1003       }
1004       MVT Src1VT = Src1.getSimpleValueType();
1005       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1006       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1007       default:
1008         llvm_unreachable("Unexpected LMUL!");
1009 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
1010   case RISCVII::VLMUL::lmulenum:                                               \
1011     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1012                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1013     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
1014     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
1015     break;
1016         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
1017         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
1018         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
1019         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
1020         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
1021         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
1022         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
1023 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1024       }
1025       SDValue SEW = CurDAG->getTargetConstant(
1026           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1027       SDValue VL;
1028       selectVLOp(Node->getOperand(3), VL);
1029 
1030       // If vmsgeu with 0 immediate, expand it to vmset.
1031       if (IsCmpUnsignedZero) {
1032         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
1033         return;
1034       }
1035 
1036       // Expand to
1037       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1038       SDValue Cmp = SDValue(
1039           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1040           0);
1041       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1042                                                {Cmp, Cmp, VL, SEW}));
1043       return;
1044     }
1045     case Intrinsic::riscv_vmsgeu_mask:
1046     case Intrinsic::riscv_vmsge_mask: {
1047       SDValue Src1 = Node->getOperand(2);
1048       SDValue Src2 = Node->getOperand(3);
1049       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1050       bool IsCmpUnsignedZero = false;
1051       // Only custom select scalar second operand.
1052       if (Src2.getValueType() != XLenVT)
1053         break;
1054       // Small constants are handled with patterns.
1055       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1056         int64_t CVal = C->getSExtValue();
1057         if (CVal >= -15 && CVal <= 16) {
1058           if (!IsUnsigned || CVal != 0)
1059             break;
1060           IsCmpUnsignedZero = true;
1061         }
1062       }
1063       MVT Src1VT = Src1.getSimpleValueType();
1064       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1065           VMOROpcode;
1066       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1067       default:
1068         llvm_unreachable("Unexpected LMUL!");
1069 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
1070   case RISCVII::VLMUL::lmulenum:                                               \
1071     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1072                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1073     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
1074                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1075     break;
1076         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1077         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1078         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1079         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1080         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1081         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1082         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1083 #undef CASE_VMSLT_OPCODES
1084       }
1085       // Mask operations use the LMUL from the mask type.
1086       switch (RISCVTargetLowering::getLMUL(VT)) {
1087       default:
1088         llvm_unreachable("Unexpected LMUL!");
1089 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
1090   case RISCVII::VLMUL::lmulenum:                                               \
1091     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
1092     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1093     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1094     break;
1095         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1096         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1097         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1098         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1099         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1100         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1101         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1102 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1103       }
1104       SDValue SEW = CurDAG->getTargetConstant(
1105           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1106       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1107       SDValue VL;
1108       selectVLOp(Node->getOperand(5), VL);
1109       SDValue MaskedOff = Node->getOperand(1);
1110       SDValue Mask = Node->getOperand(4);
1111 
1112       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1113       if (IsCmpUnsignedZero) {
1114         // We don't need vmor if the MaskedOff and the Mask are the same
1115         // value.
1116         if (Mask == MaskedOff) {
1117           ReplaceUses(Node, Mask.getNode());
1118           return;
1119         }
1120         ReplaceNode(Node,
1121                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1122                                            {Mask, MaskedOff, VL, MaskSEW}));
1123         return;
1124       }
1125 
1126       // If the MaskedOff value and the Mask are the same value use
1127       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1128       // This avoids needing to copy v0 to vd before starting the next sequence.
1129       if (Mask == MaskedOff) {
1130         SDValue Cmp = SDValue(
1131             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1132             0);
1133         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1134                                                  {Mask, Cmp, VL, MaskSEW}));
1135         return;
1136       }
1137 
1138       // Mask needs to be copied to V0.
1139       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1140                                            RISCV::V0, Mask, SDValue());
1141       SDValue Glue = Chain.getValue(1);
1142       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1143 
1144       // Otherwise use
1145       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1146       // The result is mask undisturbed.
1147       // We use the same instructions to emulate mask agnostic behavior, because
1148       // the agnostic result can be either undisturbed or all 1.
1149       SDValue Cmp = SDValue(
1150           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1151                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1152           0);
1153       // vmxor.mm vd, vd, v0 is used to update active value.
1154       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1155                                                {Cmp, Mask, VL, MaskSEW}));
1156       return;
1157     }
1158     case Intrinsic::riscv_vsetvli_opt:
1159     case Intrinsic::riscv_vsetvlimax_opt:
1160       return selectVSETVLI(Node);
1161     }
1162     break;
1163   }
1164   case ISD::INTRINSIC_W_CHAIN: {
1165     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1166     switch (IntNo) {
1167       // By default we do not custom select any intrinsic.
1168     default:
1169       break;
1170     case Intrinsic::riscv_vsetvli:
1171     case Intrinsic::riscv_vsetvlimax:
1172       return selectVSETVLI(Node);
1173     case Intrinsic::riscv_vlseg2:
1174     case Intrinsic::riscv_vlseg3:
1175     case Intrinsic::riscv_vlseg4:
1176     case Intrinsic::riscv_vlseg5:
1177     case Intrinsic::riscv_vlseg6:
1178     case Intrinsic::riscv_vlseg7:
1179     case Intrinsic::riscv_vlseg8: {
1180       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1181       return;
1182     }
1183     case Intrinsic::riscv_vlseg2_mask:
1184     case Intrinsic::riscv_vlseg3_mask:
1185     case Intrinsic::riscv_vlseg4_mask:
1186     case Intrinsic::riscv_vlseg5_mask:
1187     case Intrinsic::riscv_vlseg6_mask:
1188     case Intrinsic::riscv_vlseg7_mask:
1189     case Intrinsic::riscv_vlseg8_mask: {
1190       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1191       return;
1192     }
1193     case Intrinsic::riscv_vlsseg2:
1194     case Intrinsic::riscv_vlsseg3:
1195     case Intrinsic::riscv_vlsseg4:
1196     case Intrinsic::riscv_vlsseg5:
1197     case Intrinsic::riscv_vlsseg6:
1198     case Intrinsic::riscv_vlsseg7:
1199     case Intrinsic::riscv_vlsseg8: {
1200       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1201       return;
1202     }
1203     case Intrinsic::riscv_vlsseg2_mask:
1204     case Intrinsic::riscv_vlsseg3_mask:
1205     case Intrinsic::riscv_vlsseg4_mask:
1206     case Intrinsic::riscv_vlsseg5_mask:
1207     case Intrinsic::riscv_vlsseg6_mask:
1208     case Intrinsic::riscv_vlsseg7_mask:
1209     case Intrinsic::riscv_vlsseg8_mask: {
1210       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1211       return;
1212     }
1213     case Intrinsic::riscv_vloxseg2:
1214     case Intrinsic::riscv_vloxseg3:
1215     case Intrinsic::riscv_vloxseg4:
1216     case Intrinsic::riscv_vloxseg5:
1217     case Intrinsic::riscv_vloxseg6:
1218     case Intrinsic::riscv_vloxseg7:
1219     case Intrinsic::riscv_vloxseg8:
1220       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1221       return;
1222     case Intrinsic::riscv_vluxseg2:
1223     case Intrinsic::riscv_vluxseg3:
1224     case Intrinsic::riscv_vluxseg4:
1225     case Intrinsic::riscv_vluxseg5:
1226     case Intrinsic::riscv_vluxseg6:
1227     case Intrinsic::riscv_vluxseg7:
1228     case Intrinsic::riscv_vluxseg8:
1229       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1230       return;
1231     case Intrinsic::riscv_vloxseg2_mask:
1232     case Intrinsic::riscv_vloxseg3_mask:
1233     case Intrinsic::riscv_vloxseg4_mask:
1234     case Intrinsic::riscv_vloxseg5_mask:
1235     case Intrinsic::riscv_vloxseg6_mask:
1236     case Intrinsic::riscv_vloxseg7_mask:
1237     case Intrinsic::riscv_vloxseg8_mask:
1238       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1239       return;
1240     case Intrinsic::riscv_vluxseg2_mask:
1241     case Intrinsic::riscv_vluxseg3_mask:
1242     case Intrinsic::riscv_vluxseg4_mask:
1243     case Intrinsic::riscv_vluxseg5_mask:
1244     case Intrinsic::riscv_vluxseg6_mask:
1245     case Intrinsic::riscv_vluxseg7_mask:
1246     case Intrinsic::riscv_vluxseg8_mask:
1247       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1248       return;
1249     case Intrinsic::riscv_vlseg8ff:
1250     case Intrinsic::riscv_vlseg7ff:
1251     case Intrinsic::riscv_vlseg6ff:
1252     case Intrinsic::riscv_vlseg5ff:
1253     case Intrinsic::riscv_vlseg4ff:
1254     case Intrinsic::riscv_vlseg3ff:
1255     case Intrinsic::riscv_vlseg2ff: {
1256       selectVLSEGFF(Node, /*IsMasked*/ false);
1257       return;
1258     }
1259     case Intrinsic::riscv_vlseg8ff_mask:
1260     case Intrinsic::riscv_vlseg7ff_mask:
1261     case Intrinsic::riscv_vlseg6ff_mask:
1262     case Intrinsic::riscv_vlseg5ff_mask:
1263     case Intrinsic::riscv_vlseg4ff_mask:
1264     case Intrinsic::riscv_vlseg3ff_mask:
1265     case Intrinsic::riscv_vlseg2ff_mask: {
1266       selectVLSEGFF(Node, /*IsMasked*/ true);
1267       return;
1268     }
1269     case Intrinsic::riscv_vloxei:
1270     case Intrinsic::riscv_vloxei_mask:
1271     case Intrinsic::riscv_vluxei:
1272     case Intrinsic::riscv_vluxei_mask: {
1273       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1274                       IntNo == Intrinsic::riscv_vluxei_mask;
1275       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1276                        IntNo == Intrinsic::riscv_vloxei_mask;
1277 
1278       MVT VT = Node->getSimpleValueType(0);
1279       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1280 
1281       unsigned CurOp = 2;
1282       // Masked intrinsic only have TU version pseduo instructions.
1283       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1284       SmallVector<SDValue, 8> Operands;
1285       if (IsTU)
1286         Operands.push_back(Node->getOperand(CurOp++));
1287       else
1288         // Skip the undef passthru operand for nomask TA version pseudo
1289         CurOp++;
1290 
1291       MVT IndexVT;
1292       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1293                                  /*IsStridedOrIndexed*/ true, Operands,
1294                                  /*IsLoad=*/true, &IndexVT);
1295 
1296       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1297              "Element count mismatch");
1298 
1299       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1300       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1301       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1302       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1303         report_fatal_error("The V extension does not support EEW=64 for index "
1304                            "values when XLEN=32");
1305       }
1306       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1307           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1308           static_cast<unsigned>(IndexLMUL));
1309       MachineSDNode *Load =
1310           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1311 
1312       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1313         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1314 
1315       ReplaceNode(Node, Load);
1316       return;
1317     }
1318     case Intrinsic::riscv_vlm:
1319     case Intrinsic::riscv_vle:
1320     case Intrinsic::riscv_vle_mask:
1321     case Intrinsic::riscv_vlse:
1322     case Intrinsic::riscv_vlse_mask: {
1323       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1324                       IntNo == Intrinsic::riscv_vlse_mask;
1325       bool IsStrided =
1326           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1327 
1328       MVT VT = Node->getSimpleValueType(0);
1329       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1330 
1331       unsigned CurOp = 2;
1332       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1333       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1334       // Masked intrinsic only have TU version pseduo instructions.
1335       bool IsTU =
1336           HasPassthruOperand &&
1337           ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
1338       SmallVector<SDValue, 8> Operands;
1339       if (IsTU)
1340         Operands.push_back(Node->getOperand(CurOp++));
1341       else if (HasPassthruOperand)
1342         // Skip the undef passthru operand for nomask TA version pseudo
1343         CurOp++;
1344 
1345       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1346                                  Operands, /*IsLoad=*/true);
1347 
1348       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1349       const RISCV::VLEPseudo *P =
1350           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1351                               static_cast<unsigned>(LMUL));
1352       MachineSDNode *Load =
1353           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1354 
1355       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1356         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1357 
1358       ReplaceNode(Node, Load);
1359       return;
1360     }
1361     case Intrinsic::riscv_vleff:
1362     case Intrinsic::riscv_vleff_mask: {
1363       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1364 
1365       MVT VT = Node->getSimpleValueType(0);
1366       unsigned SEW = VT.getScalarSizeInBits();
1367       unsigned Log2SEW = Log2_32(SEW);
1368 
1369       unsigned CurOp = 2;
1370       // Masked intrinsic only have TU version pseduo instructions.
1371       bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1372       SmallVector<SDValue, 7> Operands;
1373       if (IsTU)
1374         Operands.push_back(Node->getOperand(CurOp++));
1375       else
1376         // Skip the undef passthru operand for nomask TA version pseudo
1377         CurOp++;
1378 
1379       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1380                                  /*IsStridedOrIndexed*/ false, Operands,
1381                                  /*IsLoad=*/true);
1382 
1383       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1384       const RISCV::VLEPseudo *P =
1385           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1386                               Log2SEW, static_cast<unsigned>(LMUL));
1387       MachineSDNode *Load =
1388           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1389                                  MVT::Other, MVT::Glue, Operands);
1390       bool TailAgnostic = !IsTU;
1391       bool MaskAgnostic = false;
1392       if (IsMasked) {
1393         uint64_t Policy =
1394             Node->getConstantOperandVal(Node->getNumOperands() - 1);
1395         TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
1396         MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
1397       }
1398       unsigned VType =
1399           RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
1400       SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
1401       SDNode *ReadVL =
1402           CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp,
1403                                  /*Glue*/ SDValue(Load, 2));
1404 
1405       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1406         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1407 
1408       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1409       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1410       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1411       CurDAG->RemoveDeadNode(Node);
1412       return;
1413     }
1414     }
1415     break;
1416   }
1417   case ISD::INTRINSIC_VOID: {
1418     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1419     switch (IntNo) {
1420     case Intrinsic::riscv_vsseg2:
1421     case Intrinsic::riscv_vsseg3:
1422     case Intrinsic::riscv_vsseg4:
1423     case Intrinsic::riscv_vsseg5:
1424     case Intrinsic::riscv_vsseg6:
1425     case Intrinsic::riscv_vsseg7:
1426     case Intrinsic::riscv_vsseg8: {
1427       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1428       return;
1429     }
1430     case Intrinsic::riscv_vsseg2_mask:
1431     case Intrinsic::riscv_vsseg3_mask:
1432     case Intrinsic::riscv_vsseg4_mask:
1433     case Intrinsic::riscv_vsseg5_mask:
1434     case Intrinsic::riscv_vsseg6_mask:
1435     case Intrinsic::riscv_vsseg7_mask:
1436     case Intrinsic::riscv_vsseg8_mask: {
1437       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1438       return;
1439     }
1440     case Intrinsic::riscv_vssseg2:
1441     case Intrinsic::riscv_vssseg3:
1442     case Intrinsic::riscv_vssseg4:
1443     case Intrinsic::riscv_vssseg5:
1444     case Intrinsic::riscv_vssseg6:
1445     case Intrinsic::riscv_vssseg7:
1446     case Intrinsic::riscv_vssseg8: {
1447       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1448       return;
1449     }
1450     case Intrinsic::riscv_vssseg2_mask:
1451     case Intrinsic::riscv_vssseg3_mask:
1452     case Intrinsic::riscv_vssseg4_mask:
1453     case Intrinsic::riscv_vssseg5_mask:
1454     case Intrinsic::riscv_vssseg6_mask:
1455     case Intrinsic::riscv_vssseg7_mask:
1456     case Intrinsic::riscv_vssseg8_mask: {
1457       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1458       return;
1459     }
1460     case Intrinsic::riscv_vsoxseg2:
1461     case Intrinsic::riscv_vsoxseg3:
1462     case Intrinsic::riscv_vsoxseg4:
1463     case Intrinsic::riscv_vsoxseg5:
1464     case Intrinsic::riscv_vsoxseg6:
1465     case Intrinsic::riscv_vsoxseg7:
1466     case Intrinsic::riscv_vsoxseg8:
1467       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1468       return;
1469     case Intrinsic::riscv_vsuxseg2:
1470     case Intrinsic::riscv_vsuxseg3:
1471     case Intrinsic::riscv_vsuxseg4:
1472     case Intrinsic::riscv_vsuxseg5:
1473     case Intrinsic::riscv_vsuxseg6:
1474     case Intrinsic::riscv_vsuxseg7:
1475     case Intrinsic::riscv_vsuxseg8:
1476       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1477       return;
1478     case Intrinsic::riscv_vsoxseg2_mask:
1479     case Intrinsic::riscv_vsoxseg3_mask:
1480     case Intrinsic::riscv_vsoxseg4_mask:
1481     case Intrinsic::riscv_vsoxseg5_mask:
1482     case Intrinsic::riscv_vsoxseg6_mask:
1483     case Intrinsic::riscv_vsoxseg7_mask:
1484     case Intrinsic::riscv_vsoxseg8_mask:
1485       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1486       return;
1487     case Intrinsic::riscv_vsuxseg2_mask:
1488     case Intrinsic::riscv_vsuxseg3_mask:
1489     case Intrinsic::riscv_vsuxseg4_mask:
1490     case Intrinsic::riscv_vsuxseg5_mask:
1491     case Intrinsic::riscv_vsuxseg6_mask:
1492     case Intrinsic::riscv_vsuxseg7_mask:
1493     case Intrinsic::riscv_vsuxseg8_mask:
1494       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1495       return;
1496     case Intrinsic::riscv_vsoxei:
1497     case Intrinsic::riscv_vsoxei_mask:
1498     case Intrinsic::riscv_vsuxei:
1499     case Intrinsic::riscv_vsuxei_mask: {
1500       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1501                       IntNo == Intrinsic::riscv_vsuxei_mask;
1502       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1503                        IntNo == Intrinsic::riscv_vsoxei_mask;
1504 
1505       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1506       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1507 
1508       unsigned CurOp = 2;
1509       SmallVector<SDValue, 8> Operands;
1510       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1511 
1512       MVT IndexVT;
1513       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1514                                  /*IsStridedOrIndexed*/ true, Operands,
1515                                  /*IsLoad=*/false, &IndexVT);
1516 
1517       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1518              "Element count mismatch");
1519 
1520       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1521       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1522       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1523       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1524         report_fatal_error("The V extension does not support EEW=64 for index "
1525                            "values when XLEN=32");
1526       }
1527       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1528           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1529           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1530       MachineSDNode *Store =
1531           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1532 
1533       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1534         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1535 
1536       ReplaceNode(Node, Store);
1537       return;
1538     }
1539     case Intrinsic::riscv_vsm:
1540     case Intrinsic::riscv_vse:
1541     case Intrinsic::riscv_vse_mask:
1542     case Intrinsic::riscv_vsse:
1543     case Intrinsic::riscv_vsse_mask: {
1544       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1545                       IntNo == Intrinsic::riscv_vsse_mask;
1546       bool IsStrided =
1547           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1548 
1549       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1550       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1551 
1552       unsigned CurOp = 2;
1553       SmallVector<SDValue, 8> Operands;
1554       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1555 
1556       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1557                                  Operands);
1558 
1559       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1560       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1561           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1562       MachineSDNode *Store =
1563           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1564       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1565         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1566 
1567       ReplaceNode(Node, Store);
1568       return;
1569     }
1570     }
1571     break;
1572   }
1573   case ISD::BITCAST: {
1574     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1575     // Just drop bitcasts between vectors if both are fixed or both are
1576     // scalable.
1577     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1578         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1579       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1580       CurDAG->RemoveDeadNode(Node);
1581       return;
1582     }
1583     break;
1584   }
1585   case ISD::INSERT_SUBVECTOR: {
1586     SDValue V = Node->getOperand(0);
1587     SDValue SubV = Node->getOperand(1);
1588     SDLoc DL(SubV);
1589     auto Idx = Node->getConstantOperandVal(2);
1590     MVT SubVecVT = SubV.getSimpleValueType();
1591 
1592     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1593     MVT SubVecContainerVT = SubVecVT;
1594     // Establish the correct scalable-vector types for any fixed-length type.
1595     if (SubVecVT.isFixedLengthVector())
1596       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1597     if (VT.isFixedLengthVector())
1598       VT = TLI.getContainerForFixedLengthVector(VT);
1599 
1600     const auto *TRI = Subtarget->getRegisterInfo();
1601     unsigned SubRegIdx;
1602     std::tie(SubRegIdx, Idx) =
1603         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1604             VT, SubVecContainerVT, Idx, TRI);
1605 
1606     // If the Idx hasn't been completely eliminated then this is a subvector
1607     // insert which doesn't naturally align to a vector register. These must
1608     // be handled using instructions to manipulate the vector registers.
1609     if (Idx != 0)
1610       break;
1611 
1612     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1613     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1614                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1615                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1616     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1617     assert((!IsSubVecPartReg || V.isUndef()) &&
1618            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1619            "the subvector is smaller than a full-sized register");
1620 
1621     // If we haven't set a SubRegIdx, then we must be going between
1622     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1623     if (SubRegIdx == RISCV::NoSubRegister) {
1624       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1625       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1626                  InRegClassID &&
1627              "Unexpected subvector extraction");
1628       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1629       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1630                                                DL, VT, SubV, RC);
1631       ReplaceNode(Node, NewNode);
1632       return;
1633     }
1634 
1635     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1636     ReplaceNode(Node, Insert.getNode());
1637     return;
1638   }
1639   case ISD::EXTRACT_SUBVECTOR: {
1640     SDValue V = Node->getOperand(0);
1641     auto Idx = Node->getConstantOperandVal(1);
1642     MVT InVT = V.getSimpleValueType();
1643     SDLoc DL(V);
1644 
1645     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1646     MVT SubVecContainerVT = VT;
1647     // Establish the correct scalable-vector types for any fixed-length type.
1648     if (VT.isFixedLengthVector())
1649       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1650     if (InVT.isFixedLengthVector())
1651       InVT = TLI.getContainerForFixedLengthVector(InVT);
1652 
1653     const auto *TRI = Subtarget->getRegisterInfo();
1654     unsigned SubRegIdx;
1655     std::tie(SubRegIdx, Idx) =
1656         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1657             InVT, SubVecContainerVT, Idx, TRI);
1658 
1659     // If the Idx hasn't been completely eliminated then this is a subvector
1660     // extract which doesn't naturally align to a vector register. These must
1661     // be handled using instructions to manipulate the vector registers.
1662     if (Idx != 0)
1663       break;
1664 
1665     // If we haven't set a SubRegIdx, then we must be going between
1666     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1667     if (SubRegIdx == RISCV::NoSubRegister) {
1668       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1669       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1670                  InRegClassID &&
1671              "Unexpected subvector extraction");
1672       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1673       SDNode *NewNode =
1674           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1675       ReplaceNode(Node, NewNode);
1676       return;
1677     }
1678 
1679     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1680     ReplaceNode(Node, Extract.getNode());
1681     return;
1682   }
1683   case ISD::SPLAT_VECTOR:
1684   case RISCVISD::VMV_S_X_VL:
1685   case RISCVISD::VFMV_S_F_VL:
1686   case RISCVISD::VMV_V_X_VL:
1687   case RISCVISD::VFMV_V_F_VL: {
1688     // Try to match splat of a scalar load to a strided load with stride of x0.
1689     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1690                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1691     bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1692     if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1693       break;
1694     SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1695     auto *Ld = dyn_cast<LoadSDNode>(Src);
1696     if (!Ld)
1697       break;
1698     EVT MemVT = Ld->getMemoryVT();
1699     // The memory VT should be the same size as the element type.
1700     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1701       break;
1702     if (!IsProfitableToFold(Src, Node, Node) ||
1703         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1704       break;
1705 
1706     SDValue VL;
1707     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1708       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1709     else if (IsScalarMove) {
1710       // We could deal with more VL if we update the VSETVLI insert pass to
1711       // avoid introducing more VSETVLI.
1712       if (!isOneConstant(Node->getOperand(2)))
1713         break;
1714       selectVLOp(Node->getOperand(2), VL);
1715     } else
1716       selectVLOp(Node->getOperand(2), VL);
1717 
1718     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1719     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1720 
1721     SDValue Operands[] = {Ld->getBasePtr(),
1722                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1723                           Ld->getChain()};
1724 
1725     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1726     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1727         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1728         Log2SEW, static_cast<unsigned>(LMUL));
1729     MachineSDNode *Load =
1730         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1731 
1732     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1733 
1734     ReplaceNode(Node, Load);
1735     return;
1736   }
1737   }
1738 
1739   // Select the default instruction.
1740   SelectCode(Node);
1741 }
1742 
1743 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1744     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1745   switch (ConstraintID) {
1746   case InlineAsm::Constraint_m:
1747     // We just support simple memory operands that have a single address
1748     // operand and need no special handling.
1749     OutOps.push_back(Op);
1750     return false;
1751   case InlineAsm::Constraint_A:
1752     OutOps.push_back(Op);
1753     return false;
1754   default:
1755     break;
1756   }
1757 
1758   return true;
1759 }
1760 
1761 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1762   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1763     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1764     return true;
1765   }
1766   return false;
1767 }
1768 
1769 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1770   // If this is FrameIndex, select it directly. Otherwise just let it get
1771   // selected to a register independently.
1772   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1773     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1774   else
1775     Base = Addr;
1776   return true;
1777 }
1778 
1779 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1780                                         SDValue &ShAmt) {
1781   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1782   // amount. If there is an AND on the shift amount, we can bypass it if it
1783   // doesn't affect any of those bits.
1784   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1785     const APInt &AndMask = N->getConstantOperandAPInt(1);
1786 
1787     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1788     // mask that covers the bits needed to represent all shift amounts.
1789     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1790     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1791 
1792     if (ShMask.isSubsetOf(AndMask)) {
1793       ShAmt = N.getOperand(0);
1794       return true;
1795     }
1796 
1797     // SimplifyDemandedBits may have optimized the mask so try restoring any
1798     // bits that are known zero.
1799     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1800     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1801       ShAmt = N.getOperand(0);
1802       return true;
1803     }
1804   } else if (N.getOpcode() == ISD::SUB &&
1805              isa<ConstantSDNode>(N.getOperand(0))) {
1806     uint64_t Imm = N.getConstantOperandVal(0);
1807     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1808     // generate a NEG instead of a SUB of a constant.
1809     if (Imm != 0 && Imm % ShiftWidth == 0) {
1810       SDLoc DL(N);
1811       EVT VT = N.getValueType();
1812       SDValue Zero =
1813           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, VT);
1814       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1815       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1816                                                   N.getOperand(1));
1817       ShAmt = SDValue(Neg, 0);
1818       return true;
1819     }
1820   }
1821 
1822   ShAmt = N;
1823   return true;
1824 }
1825 
1826 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1827   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1828       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1829     Val = N.getOperand(0);
1830     return true;
1831   }
1832   MVT VT = N.getSimpleValueType();
1833   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1834     Val = N;
1835     return true;
1836   }
1837 
1838   return false;
1839 }
1840 
1841 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1842   if (N.getOpcode() == ISD::AND) {
1843     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1844     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1845       Val = N.getOperand(0);
1846       return true;
1847     }
1848   }
1849   MVT VT = N.getSimpleValueType();
1850   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1851   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1852     Val = N;
1853     return true;
1854   }
1855 
1856   return false;
1857 }
1858 
1859 // Return true if all users of this SDNode* only consume the lower \p Bits.
1860 // This can be used to form W instructions for add/sub/mul/shl even when the
1861 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1862 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1863 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1864 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1865 // may be able to use a W instruction and CSE with the other instruction if
1866 // this has happened. We could try to detect that the CSE opportunity exists
1867 // before doing this, but that would be more complicated.
1868 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1869 // opportunities.
1870 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1871   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1872           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1873           Node->getOpcode() == ISD::SRL ||
1874           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1875           Node->getOpcode() == RISCVISD::GREV ||
1876           Node->getOpcode() == RISCVISD::GORC ||
1877           isa<ConstantSDNode>(Node)) &&
1878          "Unexpected opcode");
1879 
1880   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1881     SDNode *User = *UI;
1882     // Users of this node should have already been instruction selected
1883     if (!User->isMachineOpcode())
1884       return false;
1885 
1886     // TODO: Add more opcodes?
1887     switch (User->getMachineOpcode()) {
1888     default:
1889       return false;
1890     case RISCV::ADDW:
1891     case RISCV::ADDIW:
1892     case RISCV::SUBW:
1893     case RISCV::MULW:
1894     case RISCV::SLLW:
1895     case RISCV::SLLIW:
1896     case RISCV::SRAW:
1897     case RISCV::SRAIW:
1898     case RISCV::SRLW:
1899     case RISCV::SRLIW:
1900     case RISCV::DIVW:
1901     case RISCV::DIVUW:
1902     case RISCV::REMW:
1903     case RISCV::REMUW:
1904     case RISCV::ROLW:
1905     case RISCV::RORW:
1906     case RISCV::RORIW:
1907     case RISCV::CLZW:
1908     case RISCV::CTZW:
1909     case RISCV::CPOPW:
1910     case RISCV::SLLI_UW:
1911     case RISCV::FMV_W_X:
1912     case RISCV::FCVT_H_W:
1913     case RISCV::FCVT_H_WU:
1914     case RISCV::FCVT_S_W:
1915     case RISCV::FCVT_S_WU:
1916     case RISCV::FCVT_D_W:
1917     case RISCV::FCVT_D_WU:
1918       if (Bits < 32)
1919         return false;
1920       break;
1921     case RISCV::SLLI:
1922       // SLLI only uses the lower (XLen - ShAmt) bits.
1923       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1924         return false;
1925       break;
1926     case RISCV::ANDI:
1927       if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
1928         return false;
1929       break;
1930     case RISCV::SEXT_B:
1931       if (Bits < 8)
1932         return false;
1933       break;
1934     case RISCV::SEXT_H:
1935     case RISCV::FMV_H_X:
1936     case RISCV::ZEXT_H_RV32:
1937     case RISCV::ZEXT_H_RV64:
1938       if (Bits < 16)
1939         return false;
1940       break;
1941     case RISCV::ADD_UW:
1942     case RISCV::SH1ADD_UW:
1943     case RISCV::SH2ADD_UW:
1944     case RISCV::SH3ADD_UW:
1945       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1946       // 32 bits.
1947       if (UI.getOperandNo() != 0 || Bits < 32)
1948         return false;
1949       break;
1950     case RISCV::SB:
1951       if (UI.getOperandNo() != 0 || Bits < 8)
1952         return false;
1953       break;
1954     case RISCV::SH:
1955       if (UI.getOperandNo() != 0 || Bits < 16)
1956         return false;
1957       break;
1958     case RISCV::SW:
1959       if (UI.getOperandNo() != 0 || Bits < 32)
1960         return false;
1961       break;
1962     }
1963   }
1964 
1965   return true;
1966 }
1967 
1968 // Select VL as a 5 bit immediate or a value that will become a register. This
1969 // allows us to choose betwen VSETIVLI or VSETVLI later.
1970 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1971   auto *C = dyn_cast<ConstantSDNode>(N);
1972   if (C && isUInt<5>(C->getZExtValue())) {
1973     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1974                                    N->getValueType(0));
1975   } else if (C && C->isAllOnesValue()) {
1976     // Treat all ones as VLMax.
1977     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1978                                    N->getValueType(0));
1979   } else if (isa<RegisterSDNode>(N) &&
1980              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
1981     // All our VL operands use an operand that allows GPRNoX0 or an immediate
1982     // as the register class. Convert X0 to a special immediate to pass the
1983     // MachineVerifier. This is recognized specially by the vsetvli insertion
1984     // pass.
1985     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
1986                                    N->getValueType(0));
1987   } else {
1988     VL = N;
1989   }
1990 
1991   return true;
1992 }
1993 
1994 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1995   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
1996     return false;
1997   SplatVal = N.getOperand(1);
1998   return true;
1999 }
2000 
2001 using ValidateFn = bool (*)(int64_t);
2002 
2003 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2004                                    SelectionDAG &DAG,
2005                                    const RISCVSubtarget &Subtarget,
2006                                    ValidateFn ValidateImm) {
2007   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2008       !isa<ConstantSDNode>(N.getOperand(1)))
2009     return false;
2010 
2011   int64_t SplatImm =
2012       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2013 
2014   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2015   // type is wider than the resulting vector element type: an implicit
2016   // truncation first takes place. Therefore, perform a manual
2017   // truncation/sign-extension in order to ignore any truncated bits and catch
2018   // any zero-extended immediate.
2019   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2020   // sign-extending to (XLenVT -1).
2021   MVT XLenVT = Subtarget.getXLenVT();
2022   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2023          "Unexpected splat operand type");
2024   MVT EltVT = N.getSimpleValueType().getVectorElementType();
2025   if (EltVT.bitsLT(XLenVT))
2026     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2027 
2028   if (!ValidateImm(SplatImm))
2029     return false;
2030 
2031   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2032   return true;
2033 }
2034 
2035 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
2036   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2037                                 [](int64_t Imm) { return isInt<5>(Imm); });
2038 }
2039 
2040 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
2041   return selectVSplatSimmHelper(
2042       N, SplatVal, *CurDAG, *Subtarget,
2043       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2044 }
2045 
2046 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
2047                                                       SDValue &SplatVal) {
2048   return selectVSplatSimmHelper(
2049       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2050         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2051       });
2052 }
2053 
2054 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2055   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2056       !isa<ConstantSDNode>(N.getOperand(1)))
2057     return false;
2058 
2059   int64_t SplatImm =
2060       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2061 
2062   if (!isUInt<5>(SplatImm))
2063     return false;
2064 
2065   SplatVal =
2066       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2067 
2068   return true;
2069 }
2070 
2071 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2072                                        SDValue &Imm) {
2073   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2074     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2075 
2076     if (!isInt<5>(ImmVal))
2077       return false;
2078 
2079     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2080     return true;
2081   }
2082 
2083   return false;
2084 }
2085 
2086 // Merge an ADDI into the offset of a load/store instruction where possible.
2087 // (load (addi base, off1), off2) -> (load base, off1+off2)
2088 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
2089 // (load (add base, (addi src, off1)), off2)
2090 //    -> (load (add base, src), off1+off2)
2091 // (store val, (add base, (addi src, off1)), off2)
2092 //    -> (store val, (add base, src), off1+off2)
2093 // This is possible when off1+off2 fits a 12-bit immediate.
2094 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
2095   int OffsetOpIdx;
2096   int BaseOpIdx;
2097 
2098   // Only attempt this optimisation for I-type loads and S-type stores.
2099   switch (N->getMachineOpcode()) {
2100   default:
2101     return false;
2102   case RISCV::LB:
2103   case RISCV::LH:
2104   case RISCV::LW:
2105   case RISCV::LBU:
2106   case RISCV::LHU:
2107   case RISCV::LWU:
2108   case RISCV::LD:
2109   case RISCV::FLH:
2110   case RISCV::FLW:
2111   case RISCV::FLD:
2112     BaseOpIdx = 0;
2113     OffsetOpIdx = 1;
2114     break;
2115   case RISCV::SB:
2116   case RISCV::SH:
2117   case RISCV::SW:
2118   case RISCV::SD:
2119   case RISCV::FSH:
2120   case RISCV::FSW:
2121   case RISCV::FSD:
2122     BaseOpIdx = 1;
2123     OffsetOpIdx = 2;
2124     break;
2125   }
2126 
2127   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
2128     return false;
2129 
2130   SDValue Base = N->getOperand(BaseOpIdx);
2131 
2132   if (!Base.isMachineOpcode())
2133     return false;
2134 
2135   // There is a ADD between ADDI and load/store. We can only fold ADDI that
2136   // do not have a FrameIndex operand.
2137   SDValue Add;
2138   unsigned AddBaseIdx;
2139   if (Base.getMachineOpcode() == RISCV::ADD && Base.hasOneUse()) {
2140     Add = Base;
2141     SDValue Op0 = Base.getOperand(0);
2142     SDValue Op1 = Base.getOperand(1);
2143     if (Op0.isMachineOpcode() && Op0.getMachineOpcode() == RISCV::ADDI &&
2144         !isa<FrameIndexSDNode>(Op0.getOperand(0)) &&
2145         isa<ConstantSDNode>(Op0.getOperand(1))) {
2146       AddBaseIdx = 1;
2147       Base = Op0;
2148     } else if (Op1.isMachineOpcode() && Op1.getMachineOpcode() == RISCV::ADDI &&
2149                !isa<FrameIndexSDNode>(Op1.getOperand(0)) &&
2150                isa<ConstantSDNode>(Op1.getOperand(1))) {
2151       AddBaseIdx = 0;
2152       Base = Op1;
2153     } else if (Op1.isMachineOpcode() &&
2154                Op1.getMachineOpcode() == RISCV::ADDIW &&
2155                isa<ConstantSDNode>(Op1.getOperand(1)) &&
2156                Op1.getOperand(0).isMachineOpcode() &&
2157                Op1.getOperand(0).getMachineOpcode() == RISCV::LUI) {
2158       // We found an LUI+ADDIW constant materialization. We might be able to
2159       // fold the ADDIW offset if it could be treated as ADDI.
2160       // Emulate the constant materialization to see if the result would be
2161       // a simm32 if ADDI was used instead of ADDIW.
2162 
2163       // First the LUI.
2164       uint64_t Imm = Op1.getOperand(0).getConstantOperandVal(0);
2165       Imm <<= 12;
2166       Imm = SignExtend64(Imm, 32);
2167 
2168       // Then the ADDI.
2169       uint64_t LoImm = cast<ConstantSDNode>(Op1.getOperand(1))->getSExtValue();
2170       Imm += LoImm;
2171 
2172       // If the result isn't a simm32, we can't do the optimization.
2173       if (!isInt<32>(Imm))
2174         return false;
2175 
2176       AddBaseIdx = 0;
2177       Base = Op1;
2178     } else
2179       return false;
2180   } else if (Base.getMachineOpcode() == RISCV::ADDI) {
2181     // If the base is an ADDI, we can merge it in to the load/store.
2182   } else
2183     return false;
2184 
2185   SDValue ImmOperand = Base.getOperand(1);
2186   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
2187 
2188   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
2189     int64_t Offset1 = Const->getSExtValue();
2190     int64_t CombinedOffset = Offset1 + Offset2;
2191     if (!isInt<12>(CombinedOffset))
2192       return false;
2193     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
2194                                            ImmOperand.getValueType());
2195   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
2196     // If the off1 in (addi base, off1) is a global variable's address (its
2197     // low part, really), then we can rely on the alignment of that variable
2198     // to provide a margin of safety before off1 can overflow the 12 bits.
2199     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
2200     const DataLayout &DL = CurDAG->getDataLayout();
2201     Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
2202     if (Offset2 != 0 && Alignment <= Offset2)
2203       return false;
2204     int64_t Offset1 = GA->getOffset();
2205     int64_t CombinedOffset = Offset1 + Offset2;
2206     ImmOperand = CurDAG->getTargetGlobalAddress(
2207         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
2208         CombinedOffset, GA->getTargetFlags());
2209   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
2210     // Ditto.
2211     Align Alignment = CP->getAlign();
2212     if (Offset2 != 0 && Alignment <= Offset2)
2213       return false;
2214     int64_t Offset1 = CP->getOffset();
2215     int64_t CombinedOffset = Offset1 + Offset2;
2216     ImmOperand = CurDAG->getTargetConstantPool(
2217         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
2218         CombinedOffset, CP->getTargetFlags());
2219   } else {
2220     return false;
2221   }
2222 
2223   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
2224   LLVM_DEBUG(Base->dump(CurDAG));
2225   LLVM_DEBUG(dbgs() << "\nN: ");
2226   LLVM_DEBUG(N->dump(CurDAG));
2227   LLVM_DEBUG(dbgs() << "\n");
2228 
2229   if (Add)
2230     Add = SDValue(CurDAG->UpdateNodeOperands(Add.getNode(),
2231                                              Add.getOperand(AddBaseIdx),
2232                                              Base.getOperand(0)),
2233                   0);
2234 
2235   // Modify the offset operand of the load/store.
2236   if (BaseOpIdx == 0) { // Load
2237     if (Add)
2238       N = CurDAG->UpdateNodeOperands(N, Add, ImmOperand, N->getOperand(2));
2239     else
2240       N = CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
2241                                      N->getOperand(2));
2242   } else { // Store
2243     if (Add)
2244       N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), Add, ImmOperand,
2245                                      N->getOperand(3));
2246     else
2247       N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
2248                                      ImmOperand, N->getOperand(3));
2249   }
2250 
2251   return true;
2252 }
2253 
2254 // Try to remove sext.w if the input is a W instruction or can be made into
2255 // a W instruction cheaply.
2256 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2257   // Look for the sext.w pattern, addiw rd, rs1, 0.
2258   if (N->getMachineOpcode() != RISCV::ADDIW ||
2259       !isNullConstant(N->getOperand(1)))
2260     return false;
2261 
2262   SDValue N0 = N->getOperand(0);
2263   if (!N0.isMachineOpcode())
2264     return false;
2265 
2266   switch (N0.getMachineOpcode()) {
2267   default:
2268     break;
2269   case RISCV::ADD:
2270   case RISCV::ADDI:
2271   case RISCV::SUB:
2272   case RISCV::MUL:
2273   case RISCV::SLLI: {
2274     // Convert sext.w+add/sub/mul to their W instructions. This will create
2275     // a new independent instruction. This improves latency.
2276     unsigned Opc;
2277     switch (N0.getMachineOpcode()) {
2278     default:
2279       llvm_unreachable("Unexpected opcode!");
2280     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2281     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2282     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2283     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2284     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2285     }
2286 
2287     SDValue N00 = N0.getOperand(0);
2288     SDValue N01 = N0.getOperand(1);
2289 
2290     // Shift amount needs to be uimm5.
2291     if (N0.getMachineOpcode() == RISCV::SLLI &&
2292         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2293       break;
2294 
2295     SDNode *Result =
2296         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2297                                N00, N01);
2298     ReplaceUses(N, Result);
2299     return true;
2300   }
2301   case RISCV::ADDW:
2302   case RISCV::ADDIW:
2303   case RISCV::SUBW:
2304   case RISCV::MULW:
2305   case RISCV::SLLIW:
2306   case RISCV::GREVIW:
2307   case RISCV::GORCIW:
2308     // Result is already sign extended just remove the sext.w.
2309     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2310     ReplaceUses(N, N0.getNode());
2311     return true;
2312   }
2313 
2314   return false;
2315 }
2316 
2317 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2318 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2319 // take the form of a V0 physical register operand, with a glued
2320 // register-setting instruction.
2321 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2322   const RISCV::RISCVMaskedPseudoInfo *I =
2323       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2324   if (!I)
2325     return false;
2326 
2327   unsigned MaskOpIdx = I->MaskOpIdx;
2328 
2329   // Check that we're using V0 as a mask register.
2330   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2331       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2332     return false;
2333 
2334   // The glued user defines V0.
2335   const auto *Glued = N->getGluedNode();
2336 
2337   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2338     return false;
2339 
2340   // Check that we're defining V0 as a mask register.
2341   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2342       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2343     return false;
2344 
2345   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2346   SDValue MaskSetter = Glued->getOperand(2);
2347 
2348   const auto IsVMSet = [](unsigned Opc) {
2349     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2350            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2351            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2352            Opc == RISCV::PseudoVMSET_M_B8;
2353   };
2354 
2355   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2356   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2357   // assume that it's all-ones? Same applies to its VL.
2358   if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2359     return false;
2360 
2361   // Retrieve the tail policy operand index, if any.
2362   Optional<unsigned> TailPolicyOpIdx;
2363   const RISCVInstrInfo *TII = static_cast<const RISCVInstrInfo *>(
2364       CurDAG->getSubtarget().getInstrInfo());
2365 
2366   const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode());
2367 
2368   bool IsTA = true;
2369   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2370     // The last operand of the pseudo is the policy op, but we might have a
2371     // Glue operand last. We might also have a chain.
2372     TailPolicyOpIdx = N->getNumOperands() - 1;
2373     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2374       (*TailPolicyOpIdx)--;
2375     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2376       (*TailPolicyOpIdx)--;
2377 
2378     if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2379           RISCVII::TAIL_AGNOSTIC)) {
2380       // Keep the true-masked instruction when there is no unmasked TU
2381       // instruction
2382       if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2383         return false;
2384       // We can't use TA if the tie-operand is not IMPLICIT_DEF
2385       if (!N->getOperand(0).isUndef())
2386         IsTA = false;
2387     }
2388   }
2389 
2390   if (IsTA) {
2391     uint64_t TSFlags = TII->get(I->UnmaskedPseudo).TSFlags;
2392 
2393     // Check that we're dropping the merge operand, the mask operand, and any
2394     // policy operand when we transform to this unmasked pseudo.
2395     assert(!RISCVII::hasMergeOp(TSFlags) && RISCVII::hasDummyMaskOp(TSFlags) &&
2396            !RISCVII::hasVecPolicyOp(TSFlags) &&
2397            "Unexpected pseudo to transform to");
2398     (void)TSFlags;
2399   } else {
2400     uint64_t TSFlags = TII->get(I->UnmaskedTUPseudo).TSFlags;
2401 
2402     // Check that we're dropping the mask operand, and any policy operand
2403     // when we transform to this unmasked tu pseudo.
2404     assert(RISCVII::hasMergeOp(TSFlags) && RISCVII::hasDummyMaskOp(TSFlags) &&
2405            !RISCVII::hasVecPolicyOp(TSFlags) &&
2406            "Unexpected pseudo to transform to");
2407     (void)TSFlags;
2408   }
2409 
2410   unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2411   SmallVector<SDValue, 8> Ops;
2412   // Skip the merge operand at index 0 if IsTA
2413   for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2414     // Skip the mask, the policy, and the Glue.
2415     SDValue Op = N->getOperand(I);
2416     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2417         Op.getValueType() == MVT::Glue)
2418       continue;
2419     Ops.push_back(Op);
2420   }
2421 
2422   // Transitively apply any node glued to our new node.
2423   if (auto *TGlued = Glued->getGluedNode())
2424     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2425 
2426   SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2427   ReplaceUses(N, Result);
2428 
2429   return true;
2430 }
2431 
2432 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2433 // for instruction scheduling.
2434 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
2435   return new RISCVDAGToDAGISel(TM);
2436 }
2437